xref: /onnv-gate/usr/src/uts/common/os/cyclic.c (revision 0:68f95e015346)
1*0Sstevel@tonic-gate /*
2*0Sstevel@tonic-gate  * CDDL HEADER START
3*0Sstevel@tonic-gate  *
4*0Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
5*0Sstevel@tonic-gate  * Common Development and Distribution License, Version 1.0 only
6*0Sstevel@tonic-gate  * (the "License").  You may not use this file except in compliance
7*0Sstevel@tonic-gate  * with the License.
8*0Sstevel@tonic-gate  *
9*0Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10*0Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
11*0Sstevel@tonic-gate  * See the License for the specific language governing permissions
12*0Sstevel@tonic-gate  * and limitations under the License.
13*0Sstevel@tonic-gate  *
14*0Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
15*0Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16*0Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
17*0Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
18*0Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
19*0Sstevel@tonic-gate  *
20*0Sstevel@tonic-gate  * CDDL HEADER END
21*0Sstevel@tonic-gate  */
22*0Sstevel@tonic-gate /*
23*0Sstevel@tonic-gate  * Copyright 2004 Sun Microsystems, Inc.  All rights reserved.
24*0Sstevel@tonic-gate  * Use is subject to license terms.
25*0Sstevel@tonic-gate  */
26*0Sstevel@tonic-gate 
27*0Sstevel@tonic-gate #pragma ident	"%Z%%M%	%I%	%E% SMI"
28*0Sstevel@tonic-gate 
29*0Sstevel@tonic-gate /*
30*0Sstevel@tonic-gate  *  The Cyclic Subsystem
31*0Sstevel@tonic-gate  *  --------------------
32*0Sstevel@tonic-gate  *
33*0Sstevel@tonic-gate  *  Prehistory
34*0Sstevel@tonic-gate  *
35*0Sstevel@tonic-gate  *  Historically, most computer architectures have specified interval-based
36*0Sstevel@tonic-gate  *  timer parts (e.g. SPARCstation's counter/timer; Intel's i8254).  While
37*0Sstevel@tonic-gate  *  these parts deal in relative (i.e. not absolute) time values, they are
38*0Sstevel@tonic-gate  *  typically used by the operating system to implement the abstraction of
39*0Sstevel@tonic-gate  *  absolute time.  As a result, these parts cannot typically be reprogrammed
40*0Sstevel@tonic-gate  *  without introducing error in the system's notion of time.
41*0Sstevel@tonic-gate  *
42*0Sstevel@tonic-gate  *  Starting in about 1994, chip architectures began specifying high resolution
43*0Sstevel@tonic-gate  *  timestamp registers.  As of this writing (1999), all major chip families
44*0Sstevel@tonic-gate  *  (UltraSPARC, PentiumPro, MIPS, PowerPC, Alpha) have high resolution
45*0Sstevel@tonic-gate  *  timestamp registers, and two (UltraSPARC and MIPS) have added the capacity
46*0Sstevel@tonic-gate  *  to interrupt based on timestamp values.  These timestamp-compare registers
47*0Sstevel@tonic-gate  *  present a time-based interrupt source which can be reprogrammed arbitrarily
48*0Sstevel@tonic-gate  *  often without introducing error.  Given the low cost of implementing such a
49*0Sstevel@tonic-gate  *  timestamp-compare register (and the tangible benefit of eliminating
50*0Sstevel@tonic-gate  *  discrete timer parts), it is reasonable to expect that future chip
51*0Sstevel@tonic-gate  *  architectures will adopt this feature.
52*0Sstevel@tonic-gate  *
53*0Sstevel@tonic-gate  *  The cyclic subsystem has been designed to take advantage of chip
54*0Sstevel@tonic-gate  *  architectures with the capacity to interrupt based on absolute, high
55*0Sstevel@tonic-gate  *  resolution values of time.
56*0Sstevel@tonic-gate  *
57*0Sstevel@tonic-gate  *  Subsystem Overview
58*0Sstevel@tonic-gate  *
59*0Sstevel@tonic-gate  *  The cyclic subsystem is a low-level kernel subsystem designed to provide
60*0Sstevel@tonic-gate  *  arbitrarily high resolution, per-CPU interval timers (to avoid colliding
61*0Sstevel@tonic-gate  *  with existing terms, we dub such an interval timer a "cyclic").  Cyclics
62*0Sstevel@tonic-gate  *  can be specified to fire at high, lock or low interrupt level, and may be
63*0Sstevel@tonic-gate  *  optionally bound to a CPU or a CPU partition.  A cyclic's CPU or CPU
64*0Sstevel@tonic-gate  *  partition binding may be changed dynamically; the cyclic will be "juggled"
65*0Sstevel@tonic-gate  *  to a CPU which satisfies the new binding.  Alternatively, a cyclic may
66*0Sstevel@tonic-gate  *  be specified to be "omnipresent", denoting firing on all online CPUs.
67*0Sstevel@tonic-gate  *
68*0Sstevel@tonic-gate  *  Cyclic Subsystem Interface Overview
69*0Sstevel@tonic-gate  *  -----------------------------------
70*0Sstevel@tonic-gate  *
71*0Sstevel@tonic-gate  *  The cyclic subsystem has interfaces with the kernel at-large, with other
72*0Sstevel@tonic-gate  *  kernel subsystems (e.g. the processor management subsystem, the checkpoint
73*0Sstevel@tonic-gate  *  resume subsystem) and with the platform (the cyclic backend).  Each
74*0Sstevel@tonic-gate  *  of these interfaces is given a brief synopsis here, and is described
75*0Sstevel@tonic-gate  *  in full above the interface's implementation.
76*0Sstevel@tonic-gate  *
77*0Sstevel@tonic-gate  *  The following diagram displays the cyclic subsystem's interfaces to
78*0Sstevel@tonic-gate  *  other kernel components.  The arrows denote a "calls" relationship, with
79*0Sstevel@tonic-gate  *  the large arrow indicating the cyclic subsystem's consumer interface.
80*0Sstevel@tonic-gate  *  Each arrow is labeled with the section in which the corresponding
81*0Sstevel@tonic-gate  *  interface is described.
82*0Sstevel@tonic-gate  *
83*0Sstevel@tonic-gate  *           Kernel at-large consumers
84*0Sstevel@tonic-gate  *           -----------++------------
85*0Sstevel@tonic-gate  *                      ||
86*0Sstevel@tonic-gate  *                      ||
87*0Sstevel@tonic-gate  *                     _||_
88*0Sstevel@tonic-gate  *                     \  /
89*0Sstevel@tonic-gate  *                      \/
90*0Sstevel@tonic-gate  *            +---------------------+
91*0Sstevel@tonic-gate  *            |                     |
92*0Sstevel@tonic-gate  *            |  Cyclic subsystem   |<-----------  Other kernel subsystems
93*0Sstevel@tonic-gate  *            |                     |
94*0Sstevel@tonic-gate  *            +---------------------+
95*0Sstevel@tonic-gate  *                   ^       |
96*0Sstevel@tonic-gate  *                   |       |
97*0Sstevel@tonic-gate  *                   |       |
98*0Sstevel@tonic-gate  *                   |       v
99*0Sstevel@tonic-gate  *            +---------------------+
100*0Sstevel@tonic-gate  *            |                     |
101*0Sstevel@tonic-gate  *            |   Cyclic backend    |
102*0Sstevel@tonic-gate  *            | (platform specific) |
103*0Sstevel@tonic-gate  *            |                     |
104*0Sstevel@tonic-gate  *            +---------------------+
105*0Sstevel@tonic-gate  *
106*0Sstevel@tonic-gate  *
107*0Sstevel@tonic-gate  *  Kernel At-Large Interfaces
108*0Sstevel@tonic-gate  *
109*0Sstevel@tonic-gate  *      cyclic_add()         <-- Creates a cyclic
110*0Sstevel@tonic-gate  *      cyclic_add_omni()    <-- Creates an omnipresent cyclic
111*0Sstevel@tonic-gate  *      cyclic_remove()      <-- Removes a cyclic
112*0Sstevel@tonic-gate  *      cyclic_bind()        <-- Change a cyclic's CPU or partition binding
113*0Sstevel@tonic-gate  *
114*0Sstevel@tonic-gate  *  Inter-subsystem Interfaces
115*0Sstevel@tonic-gate  *
116*0Sstevel@tonic-gate  *      cyclic_juggle()      <-- Juggles cyclics away from a CPU
117*0Sstevel@tonic-gate  *      cyclic_offline()     <-- Offlines cyclic operation on a CPU
118*0Sstevel@tonic-gate  *      cyclic_online()      <-- Reenables operation on an offlined CPU
119*0Sstevel@tonic-gate  *      cyclic_move_in()     <-- Notifies subsystem of change in CPU partition
120*0Sstevel@tonic-gate  *      cyclic_move_out()    <-- Notifies subsystem of change in CPU partition
121*0Sstevel@tonic-gate  *      cyclic_suspend()     <-- Suspends the cyclic subsystem on all CPUs
122*0Sstevel@tonic-gate  *      cyclic_resume()      <-- Resumes the cyclic subsystem on all CPUs
123*0Sstevel@tonic-gate  *
124*0Sstevel@tonic-gate  *  Backend Interfaces
125*0Sstevel@tonic-gate  *
126*0Sstevel@tonic-gate  *      cyclic_init()        <-- Initializes the cyclic subsystem
127*0Sstevel@tonic-gate  *      cyclic_fire()        <-- CY_HIGH_LEVEL interrupt entry point
128*0Sstevel@tonic-gate  *      cyclic_softint()     <-- CY_LOCK/LOW_LEVEL soft interrupt entry point
129*0Sstevel@tonic-gate  *
130*0Sstevel@tonic-gate  *  The backend-supplied interfaces (through the cyc_backend structure) are
131*0Sstevel@tonic-gate  *  documented in detail in <sys/cyclic_impl.h>
132*0Sstevel@tonic-gate  *
133*0Sstevel@tonic-gate  *
134*0Sstevel@tonic-gate  *  Cyclic Subsystem Implementation Overview
135*0Sstevel@tonic-gate  *  ----------------------------------------
136*0Sstevel@tonic-gate  *
137*0Sstevel@tonic-gate  *  The cyclic subsystem is designed to minimize interference between cyclics
138*0Sstevel@tonic-gate  *  on different CPUs.  Thus, all of the cyclic subsystem's data structures
139*0Sstevel@tonic-gate  *  hang off of a per-CPU structure, cyc_cpu.
140*0Sstevel@tonic-gate  *
141*0Sstevel@tonic-gate  *  Each cyc_cpu has a power-of-two sized array of cyclic structures (the
142*0Sstevel@tonic-gate  *  cyp_cyclics member of the cyc_cpu structure).  If cyclic_add() is called
143*0Sstevel@tonic-gate  *  and there does not exist a free slot in the cyp_cyclics array, the size of
144*0Sstevel@tonic-gate  *  the array will be doubled.  The array will never shrink.  Cyclics are
145*0Sstevel@tonic-gate  *  referred to by their index in the cyp_cyclics array, which is of type
146*0Sstevel@tonic-gate  *  cyc_index_t.
147*0Sstevel@tonic-gate  *
148*0Sstevel@tonic-gate  *  The cyclics are kept sorted by expiration time in the cyc_cpu's heap.  The
149*0Sstevel@tonic-gate  *  heap is keyed by cyclic expiration time, with parents expiring earlier
150*0Sstevel@tonic-gate  *  than their children.
151*0Sstevel@tonic-gate  *
152*0Sstevel@tonic-gate  *  Heap Management
153*0Sstevel@tonic-gate  *
154*0Sstevel@tonic-gate  *  The heap is managed primarily by cyclic_fire().  Upon entry, cyclic_fire()
155*0Sstevel@tonic-gate  *  compares the root cyclic's expiration time to the current time.  If the
156*0Sstevel@tonic-gate  *  expiration time is in the past, cyclic_expire() is called on the root
157*0Sstevel@tonic-gate  *  cyclic.  Upon return from cyclic_expire(), the cyclic's new expiration time
158*0Sstevel@tonic-gate  *  is derived by adding its interval to its old expiration time, and a
159*0Sstevel@tonic-gate  *  downheap operation is performed.  After the downheap, cyclic_fire()
160*0Sstevel@tonic-gate  *  examines the (potentially changed) root cyclic, repeating the
161*0Sstevel@tonic-gate  *  cyclic_expire()/add interval/cyclic_downheap() sequence until the root
162*0Sstevel@tonic-gate  *  cyclic has an expiration time in the future.  This expiration time
163*0Sstevel@tonic-gate  *  (guaranteed to be the earliest in the heap) is then communicated to the
164*0Sstevel@tonic-gate  *  backend via cyb_reprogram.  Optimal backends will next call cyclic_fire()
165*0Sstevel@tonic-gate  *  shortly after the root cyclic's expiration time.
166*0Sstevel@tonic-gate  *
167*0Sstevel@tonic-gate  *  To allow efficient, deterministic downheap operations, we implement the
168*0Sstevel@tonic-gate  *  heap as an array (the cyp_heap member of the cyc_cpu structure), with each
169*0Sstevel@tonic-gate  *  element containing an index into the CPU's cyp_cyclics array.
170*0Sstevel@tonic-gate  *
171*0Sstevel@tonic-gate  *  The heap is laid out in the array according to the following:
172*0Sstevel@tonic-gate  *
173*0Sstevel@tonic-gate  *   1.  The root of the heap is always in the 0th element of the heap array
174*0Sstevel@tonic-gate  *   2.  The left and right children of the nth element are element
175*0Sstevel@tonic-gate  *       (((n + 1) << 1) - 1) and element ((n + 1) << 1), respectively.
176*0Sstevel@tonic-gate  *
177*0Sstevel@tonic-gate  *  This layout is standard (see, e.g., Cormen's "Algorithms"); the proof
178*0Sstevel@tonic-gate  *  that these constraints correctly lay out a heap (or indeed, any binary
179*0Sstevel@tonic-gate  *  tree) is trivial and left to the reader.
180*0Sstevel@tonic-gate  *
181*0Sstevel@tonic-gate  *  To see the heap by example, assume our cyclics array has the following
182*0Sstevel@tonic-gate  *  members (at time t):
183*0Sstevel@tonic-gate  *
184*0Sstevel@tonic-gate  *            cy_handler            cy_level      cy_expire
185*0Sstevel@tonic-gate  *            ---------------------------------------------
186*0Sstevel@tonic-gate  *     [ 0]   clock()                   LOCK     t+10000000
187*0Sstevel@tonic-gate  *     [ 1]   deadman()                 HIGH   t+1000000000
188*0Sstevel@tonic-gate  *     [ 2]   clock_highres_fire()       LOW          t+100
189*0Sstevel@tonic-gate  *     [ 3]   clock_highres_fire()       LOW         t+1000
190*0Sstevel@tonic-gate  *     [ 4]   clock_highres_fire()       LOW          t+500
191*0Sstevel@tonic-gate  *     [ 5]   (free)                      --             --
192*0Sstevel@tonic-gate  *     [ 6]   (free)                      --             --
193*0Sstevel@tonic-gate  *     [ 7]   (free)                      --             --
194*0Sstevel@tonic-gate  *
195*0Sstevel@tonic-gate  *  The heap array could be:
196*0Sstevel@tonic-gate  *
197*0Sstevel@tonic-gate  *                [0]   [1]   [2]   [3]   [4]   [5]   [6]   [7]
198*0Sstevel@tonic-gate  *              +-----+-----+-----+-----+-----+-----+-----+-----+
199*0Sstevel@tonic-gate  *              |     |     |     |     |     |     |     |     |
200*0Sstevel@tonic-gate  *              |  2  |  3  |  4  |  0  |  1  |  x  |  x  |  x  |
201*0Sstevel@tonic-gate  *              |     |     |     |     |     |     |     |     |
202*0Sstevel@tonic-gate  *              +-----+-----+-----+-----+-----+-----+-----+-----+
203*0Sstevel@tonic-gate  *
204*0Sstevel@tonic-gate  *  Graphically, this array corresponds to the following (excuse the ASCII art):
205*0Sstevel@tonic-gate  *
206*0Sstevel@tonic-gate  *                                       2
207*0Sstevel@tonic-gate  *                                       |
208*0Sstevel@tonic-gate  *                    +------------------+------------------+
209*0Sstevel@tonic-gate  *                    3                                     4
210*0Sstevel@tonic-gate  *                    |
211*0Sstevel@tonic-gate  *          +---------+--------+
212*0Sstevel@tonic-gate  *          0                  1
213*0Sstevel@tonic-gate  *
214*0Sstevel@tonic-gate  *  Note that the heap is laid out by layer:  all nodes at a given depth are
215*0Sstevel@tonic-gate  *  stored in consecutive elements of the array.  Moreover, layers of
216*0Sstevel@tonic-gate  *  consecutive depths are in adjacent element ranges.  This property
217*0Sstevel@tonic-gate  *  guarantees high locality of reference during downheap operations.
218*0Sstevel@tonic-gate  *  Specifically, we are guaranteed that we can downheap to a depth of
219*0Sstevel@tonic-gate  *
220*0Sstevel@tonic-gate  *      lg (cache_line_size / sizeof (cyc_index_t))
221*0Sstevel@tonic-gate  *
222*0Sstevel@tonic-gate  *  nodes with at most one cache miss.  On UltraSPARC (64 byte e-cache line
223*0Sstevel@tonic-gate  *  size), this corresponds to a depth of four nodes.  Thus, if there are
224*0Sstevel@tonic-gate  *  fewer than sixteen cyclics in the heap, downheaps on UltraSPARC miss at
225*0Sstevel@tonic-gate  *  most once in the e-cache.
226*0Sstevel@tonic-gate  *
227*0Sstevel@tonic-gate  *  Downheaps are required to compare siblings as they proceed down the
228*0Sstevel@tonic-gate  *  heap.  For downheaps proceeding beyond the one-cache-miss depth, every
229*0Sstevel@tonic-gate  *  access to a left child could potentially miss in the cache.  However,
230*0Sstevel@tonic-gate  *  if we assume
231*0Sstevel@tonic-gate  *
232*0Sstevel@tonic-gate  *      (cache_line_size / sizeof (cyc_index_t)) > 2,
233*0Sstevel@tonic-gate  *
234*0Sstevel@tonic-gate  *  then all siblings are guaranteed to be on the same cache line.  Thus, the
235*0Sstevel@tonic-gate  *  miss on the left child will guarantee a hit on the right child; downheaps
236*0Sstevel@tonic-gate  *  will incur at most one cache miss per layer beyond the one-cache-miss
237*0Sstevel@tonic-gate  *  depth.  The total number of cache misses for heap management during a
238*0Sstevel@tonic-gate  *  downheap operation is thus bounded by
239*0Sstevel@tonic-gate  *
240*0Sstevel@tonic-gate  *      lg (n) - lg (cache_line_size / sizeof (cyc_index_t))
241*0Sstevel@tonic-gate  *
242*0Sstevel@tonic-gate  *  Traditional pointer-based heaps are implemented without regard to
243*0Sstevel@tonic-gate  *  locality.  Downheaps can thus incur two cache misses per layer (one for
244*0Sstevel@tonic-gate  *  each child), but at most one cache miss at the root.  This yields a bound
245*0Sstevel@tonic-gate  *  of
246*0Sstevel@tonic-gate  *
247*0Sstevel@tonic-gate  *      2 * lg (n) - 1
248*0Sstevel@tonic-gate  *
249*0Sstevel@tonic-gate  *  on the total cache misses.
250*0Sstevel@tonic-gate  *
251*0Sstevel@tonic-gate  *  This difference may seem theoretically trivial (the difference is, after
252*0Sstevel@tonic-gate  *  all, constant), but can become substantial in practice -- especially for
253*0Sstevel@tonic-gate  *  caches with very large cache lines and high miss penalties (e.g. TLBs).
254*0Sstevel@tonic-gate  *
255*0Sstevel@tonic-gate  *  Heaps must always be full, balanced trees.  Heap management must therefore
256*0Sstevel@tonic-gate  *  track the next point-of-insertion into the heap.  In pointer-based heaps,
257*0Sstevel@tonic-gate  *  recomputing this point takes O(lg (n)).  Given the layout of the
258*0Sstevel@tonic-gate  *  array-based implementation, however, the next point-of-insertion is
259*0Sstevel@tonic-gate  *  always:
260*0Sstevel@tonic-gate  *
261*0Sstevel@tonic-gate  *      heap[number_of_elements]
262*0Sstevel@tonic-gate  *
263*0Sstevel@tonic-gate  *  We exploit this property by implementing the free-list in the usused
264*0Sstevel@tonic-gate  *  heap elements.  Heap insertion, therefore, consists only of filling in
265*0Sstevel@tonic-gate  *  the cyclic at cyp_cyclics[cyp_heap[number_of_elements]], incrementing
266*0Sstevel@tonic-gate  *  the number of elements, and performing an upheap.  Heap deletion consists
267*0Sstevel@tonic-gate  *  of decrementing the number of elements, swapping the to-be-deleted element
268*0Sstevel@tonic-gate  *  with the element at cyp_heap[number_of_elements], and downheaping.
269*0Sstevel@tonic-gate  *
270*0Sstevel@tonic-gate  *  Filling in more details in our earlier example:
271*0Sstevel@tonic-gate  *
272*0Sstevel@tonic-gate  *                                               +--- free list head
273*0Sstevel@tonic-gate  *                                               |
274*0Sstevel@tonic-gate  *                                               V
275*0Sstevel@tonic-gate  *
276*0Sstevel@tonic-gate  *                [0]   [1]   [2]   [3]   [4]   [5]   [6]   [7]
277*0Sstevel@tonic-gate  *              +-----+-----+-----+-----+-----+-----+-----+-----+
278*0Sstevel@tonic-gate  *              |     |     |     |     |     |     |     |     |
279*0Sstevel@tonic-gate  *              |  2  |  3  |  4  |  0  |  1  |  5  |  6  |  7  |
280*0Sstevel@tonic-gate  *              |     |     |     |     |     |     |     |     |
281*0Sstevel@tonic-gate  *              +-----+-----+-----+-----+-----+-----+-----+-----+
282*0Sstevel@tonic-gate  *
283*0Sstevel@tonic-gate  *  To insert into this heap, we would just need to fill in the cyclic at
284*0Sstevel@tonic-gate  *  cyp_cyclics[5], bump the number of elements (from 5 to 6) and perform
285*0Sstevel@tonic-gate  *  an upheap.
286*0Sstevel@tonic-gate  *
287*0Sstevel@tonic-gate  *  If we wanted to remove, say, cyp_cyclics[3], we would first scan for it
288*0Sstevel@tonic-gate  *  in the cyp_heap, and discover it at cyp_heap[1].  We would then decrement
289*0Sstevel@tonic-gate  *  the number of elements (from 5 to 4), swap cyp_heap[1] with cyp_heap[4],
290*0Sstevel@tonic-gate  *  and perform a downheap from cyp_heap[1].  The linear scan is required
291*0Sstevel@tonic-gate  *  because the cyclic does not keep a backpointer into the heap.  This makes
292*0Sstevel@tonic-gate  *  heap manipulation (e.g. downheaps) faster at the expense of removal
293*0Sstevel@tonic-gate  *  operations.
294*0Sstevel@tonic-gate  *
295*0Sstevel@tonic-gate  *  Expiry processing
296*0Sstevel@tonic-gate  *
297*0Sstevel@tonic-gate  *  As alluded to above, cyclic_expire() is called by cyclic_fire() at
298*0Sstevel@tonic-gate  *  CY_HIGH_LEVEL to expire a cyclic.  Cyclic subsystem consumers are
299*0Sstevel@tonic-gate  *  guaranteed that for an arbitrary time t in the future, their cyclic
300*0Sstevel@tonic-gate  *  handler will have been called (t - cyt_when) / cyt_interval times.  Thus,
301*0Sstevel@tonic-gate  *  there must be a one-to-one mapping between a cyclic's expiration at
302*0Sstevel@tonic-gate  *  CY_HIGH_LEVEL and its execution at the desired level (either CY_HIGH_LEVEL,
303*0Sstevel@tonic-gate  *  CY_LOCK_LEVEL or CY_LOW_LEVEL).
304*0Sstevel@tonic-gate  *
305*0Sstevel@tonic-gate  *  For CY_HIGH_LEVEL cyclics, this is trivial; cyclic_expire() simply needs
306*0Sstevel@tonic-gate  *  to call the handler.
307*0Sstevel@tonic-gate  *
308*0Sstevel@tonic-gate  *  For CY_LOCK_LEVEL and CY_LOW_LEVEL cyclics, however, there exists a
309*0Sstevel@tonic-gate  *  potential disconnect:  if the CPU is at an interrupt level less than
310*0Sstevel@tonic-gate  *  CY_HIGH_LEVEL but greater than the level of a cyclic for a period of
311*0Sstevel@tonic-gate  *  time longer than twice the cyclic's interval, the cyclic will be expired
312*0Sstevel@tonic-gate  *  twice before it can be handled.
313*0Sstevel@tonic-gate  *
314*0Sstevel@tonic-gate  *  To maintain the one-to-one mapping, we track the difference between the
315*0Sstevel@tonic-gate  *  number of times a cyclic has been expired and the number of times it's
316*0Sstevel@tonic-gate  *  been handled in a "pending count" (the cy_pend field of the cyclic
317*0Sstevel@tonic-gate  *  structure).  cyclic_expire() thus increments the cy_pend count for the
318*0Sstevel@tonic-gate  *  expired cyclic and posts a soft interrupt at the desired level.  In the
319*0Sstevel@tonic-gate  *  cyclic subsystem's soft interrupt handler, cyclic_softint(), we repeatedly
320*0Sstevel@tonic-gate  *  call the cyclic handler and decrement cy_pend until we have decremented
321*0Sstevel@tonic-gate  *  cy_pend to zero.
322*0Sstevel@tonic-gate  *
323*0Sstevel@tonic-gate  *  The Producer/Consumer Buffer
324*0Sstevel@tonic-gate  *
325*0Sstevel@tonic-gate  *  If we wish to avoid a linear scan of the cyclics array at soft interrupt
326*0Sstevel@tonic-gate  *  level, cyclic_softint() must be able to quickly determine which cyclics
327*0Sstevel@tonic-gate  *  have a non-zero cy_pend count.  We thus introduce a per-soft interrupt
328*0Sstevel@tonic-gate  *  level producer/consumer buffer shared with CY_HIGH_LEVEL.  These buffers
329*0Sstevel@tonic-gate  *  are encapsulated in the cyc_pcbuffer structure, and, like cyp_heap, are
330*0Sstevel@tonic-gate  *  implemented as cyc_index_t arrays (the cypc_buf member of the cyc_pcbuffer
331*0Sstevel@tonic-gate  *  structure).
332*0Sstevel@tonic-gate  *
333*0Sstevel@tonic-gate  *  The producer (cyclic_expire() running at CY_HIGH_LEVEL) enqueues a cyclic
334*0Sstevel@tonic-gate  *  by storing the cyclic's index to cypc_buf[cypc_prodndx] and incrementing
335*0Sstevel@tonic-gate  *  cypc_prodndx.  The consumer (cyclic_softint() running at either
336*0Sstevel@tonic-gate  *  CY_LOCK_LEVEL or CY_LOW_LEVEL) dequeues a cyclic by loading from
337*0Sstevel@tonic-gate  *  cypc_buf[cypc_consndx] and bumping cypc_consndx.  The buffer is empty when
338*0Sstevel@tonic-gate  *  cypc_prodndx == cypc_consndx.
339*0Sstevel@tonic-gate  *
340*0Sstevel@tonic-gate  *  To bound the size of the producer/consumer buffer, cyclic_expire() only
341*0Sstevel@tonic-gate  *  enqueues a cyclic if its cy_pend was zero (if the cyclic's cy_pend is
342*0Sstevel@tonic-gate  *  non-zero, cyclic_expire() only bumps cy_pend).  Symmetrically,
343*0Sstevel@tonic-gate  *  cyclic_softint() only consumes a cyclic after it has decremented the
344*0Sstevel@tonic-gate  *  cy_pend count to zero.
345*0Sstevel@tonic-gate  *
346*0Sstevel@tonic-gate  *  Returning to our example, here is what the CY_LOW_LEVEL producer/consumer
347*0Sstevel@tonic-gate  *  buffer might look like:
348*0Sstevel@tonic-gate  *
349*0Sstevel@tonic-gate  *     cypc_consndx ---+                 +--- cypc_prodndx
350*0Sstevel@tonic-gate  *                     |                 |
351*0Sstevel@tonic-gate  *                     V                 V
352*0Sstevel@tonic-gate  *
353*0Sstevel@tonic-gate  *        [0]   [1]   [2]   [3]   [4]   [5]   [6]   [7]
354*0Sstevel@tonic-gate  *      +-----+-----+-----+-----+-----+-----+-----+-----+
355*0Sstevel@tonic-gate  *      |     |     |     |     |     |     |     |     |
356*0Sstevel@tonic-gate  *      |  x  |  x  |  3  |  2  |  4  |  x  |  x  |  x  |   <== cypc_buf
357*0Sstevel@tonic-gate  *      |     |     |  .  |  .  |  .  |     |     |     |
358*0Sstevel@tonic-gate  *      +-----+-----+- | -+- | -+- | -+-----+-----+-----+
359*0Sstevel@tonic-gate  *                     |     |     |
360*0Sstevel@tonic-gate  *                     |     |     |              cy_pend  cy_handler
361*0Sstevel@tonic-gate  *                     |     |     |          -------------------------
362*0Sstevel@tonic-gate  *                     |     |     |          [ 0]      1  clock()
363*0Sstevel@tonic-gate  *                     |     |     |          [ 1]      0  deadman()
364*0Sstevel@tonic-gate  *                     |     +---- | -------> [ 2]      3  clock_highres_fire()
365*0Sstevel@tonic-gate  *                     +---------- | -------> [ 3]      1  clock_highres_fire()
366*0Sstevel@tonic-gate  *                                 +--------> [ 4]      1  clock_highres_fire()
367*0Sstevel@tonic-gate  *                                            [ 5]      -  (free)
368*0Sstevel@tonic-gate  *                                            [ 6]      -  (free)
369*0Sstevel@tonic-gate  *                                            [ 7]      -  (free)
370*0Sstevel@tonic-gate  *
371*0Sstevel@tonic-gate  *  In particular, note that clock()'s cy_pend is 1 but that it is _not_ in
372*0Sstevel@tonic-gate  *  this producer/consumer buffer; it would be enqueued in the CY_LOCK_LEVEL
373*0Sstevel@tonic-gate  *  producer/consumer buffer.
374*0Sstevel@tonic-gate  *
375*0Sstevel@tonic-gate  *  Locking
376*0Sstevel@tonic-gate  *
377*0Sstevel@tonic-gate  *  Traditionally, access to per-CPU data structures shared between
378*0Sstevel@tonic-gate  *  interrupt levels is serialized by manipulating programmable interrupt
379*0Sstevel@tonic-gate  *  level:  readers and writers are required to raise their interrupt level
380*0Sstevel@tonic-gate  *  to that of the highest level writer.
381*0Sstevel@tonic-gate  *
382*0Sstevel@tonic-gate  *  For the producer/consumer buffers (shared between cyclic_fire()/
383*0Sstevel@tonic-gate  *  cyclic_expire() executing at CY_HIGH_LEVEL and cyclic_softint() executing
384*0Sstevel@tonic-gate  *  at one of CY_LOCK_LEVEL or CY_LOW_LEVEL), forcing cyclic_softint() to raise
385*0Sstevel@tonic-gate  *  programmable interrupt level is undesirable:  aside from the additional
386*0Sstevel@tonic-gate  *  latency incurred by manipulating interrupt level in the hot cy_pend
387*0Sstevel@tonic-gate  *  processing path, this would create the potential for soft level cy_pend
388*0Sstevel@tonic-gate  *  processing to delay CY_HIGH_LEVEL firing and expiry processing.
389*0Sstevel@tonic-gate  *  CY_LOCK/LOW_LEVEL cyclics could thereby induce jitter in CY_HIGH_LEVEL
390*0Sstevel@tonic-gate  *  cyclics.
391*0Sstevel@tonic-gate  *
392*0Sstevel@tonic-gate  *  To minimize jitter, then, we would like the cyclic_fire()/cyclic_expire()
393*0Sstevel@tonic-gate  *  and cyclic_softint() code paths to be lock-free.
394*0Sstevel@tonic-gate  *
395*0Sstevel@tonic-gate  *  For cyclic_fire()/cyclic_expire(), lock-free execution is straightforward:
396*0Sstevel@tonic-gate  *  because these routines execute at a higher interrupt level than
397*0Sstevel@tonic-gate  *  cyclic_softint(), their actions on the producer/consumer buffer appear
398*0Sstevel@tonic-gate  *  atomic.  In particular, the increment of cy_pend appears to occur
399*0Sstevel@tonic-gate  *  atomically with the increment of cypc_prodndx.
400*0Sstevel@tonic-gate  *
401*0Sstevel@tonic-gate  *  For cyclic_softint(), however, lock-free execution requires more delicacy.
402*0Sstevel@tonic-gate  *  When cyclic_softint() discovers a cyclic in the producer/consumer buffer,
403*0Sstevel@tonic-gate  *  it calls the cyclic's handler and attempts to atomically decrement the
404*0Sstevel@tonic-gate  *  cy_pend count with a compare&swap operation.
405*0Sstevel@tonic-gate  *
406*0Sstevel@tonic-gate  *  If the compare&swap operation succeeds, cyclic_softint() behaves
407*0Sstevel@tonic-gate  *  conditionally based on the value it atomically wrote to cy_pend:
408*0Sstevel@tonic-gate  *
409*0Sstevel@tonic-gate  *     - If the cy_pend was decremented to 0, the cyclic has been consumed;
410*0Sstevel@tonic-gate  *       cyclic_softint() increments the cypc_consndx and checks for more
411*0Sstevel@tonic-gate  *       enqueued work.
412*0Sstevel@tonic-gate  *
413*0Sstevel@tonic-gate  *     - If the count was decremented to a non-zero value, there is more work
414*0Sstevel@tonic-gate  *       to be done on the cyclic; cyclic_softint() calls the cyclic handler
415*0Sstevel@tonic-gate  *       and repeats the atomic decrement process.
416*0Sstevel@tonic-gate  *
417*0Sstevel@tonic-gate  *  If the compare&swap operation fails, cyclic_softint() knows that
418*0Sstevel@tonic-gate  *  cyclic_expire() has intervened and bumped the cy_pend count (resizes
419*0Sstevel@tonic-gate  *  and removals complicate this, however -- see the sections on their
420*0Sstevel@tonic-gate  *  operation, below).  cyclic_softint() thus reloads cy_pend, and re-attempts
421*0Sstevel@tonic-gate  *  the atomic decrement.
422*0Sstevel@tonic-gate  *
423*0Sstevel@tonic-gate  *  Recall that we bound the size of the producer/consumer buffer by
424*0Sstevel@tonic-gate  *  having cyclic_expire() only enqueue the specified cyclic if its
425*0Sstevel@tonic-gate  *  cy_pend count is zero; this assures that each cyclic is enqueued at
426*0Sstevel@tonic-gate  *  most once.  This leads to a critical constraint on cyclic_softint(),
427*0Sstevel@tonic-gate  *  however:  after the compare&swap operation which successfully decrements
428*0Sstevel@tonic-gate  *  cy_pend to zero, cyclic_softint() must _not_ re-examine the consumed
429*0Sstevel@tonic-gate  *  cyclic.  In part to obey this constraint, cyclic_softint() calls the
430*0Sstevel@tonic-gate  *  cyclic handler before decrementing cy_pend.
431*0Sstevel@tonic-gate  *
432*0Sstevel@tonic-gate  *  Resizing
433*0Sstevel@tonic-gate  *
434*0Sstevel@tonic-gate  *  All of the discussion thus far has assumed a static number of cyclics.
435*0Sstevel@tonic-gate  *  Obviously, static limitations are not practical; we need the capacity
436*0Sstevel@tonic-gate  *  to resize our data structures dynamically.
437*0Sstevel@tonic-gate  *
438*0Sstevel@tonic-gate  *  We resize our data structures lazily, and only on a per-CPU basis.
439*0Sstevel@tonic-gate  *  The size of the data structures always doubles and never shrinks.  We
440*0Sstevel@tonic-gate  *  serialize adds (and thus resizes) on cpu_lock; we never need to deal
441*0Sstevel@tonic-gate  *  with concurrent resizes.  Resizes should be rare; they may induce jitter
442*0Sstevel@tonic-gate  *  on the CPU being resized, but should not affect cyclic operation on other
443*0Sstevel@tonic-gate  *  CPUs.  Pending cyclics may not be dropped during a resize operation.
444*0Sstevel@tonic-gate  *
445*0Sstevel@tonic-gate  *  Three key cyc_cpu data structures need to be resized:  the cyclics array,
446*0Sstevel@tonic-gate  *  the heap array and the producer/consumer buffers.  Resizing the first two
447*0Sstevel@tonic-gate  *  is relatively straightforward:
448*0Sstevel@tonic-gate  *
449*0Sstevel@tonic-gate  *    1.  The new, larger arrays are allocated in cyclic_expand() (called
450*0Sstevel@tonic-gate  *        from cyclic_add()).
451*0Sstevel@tonic-gate  *    2.  cyclic_expand() cross calls cyclic_expand_xcall() on the CPU
452*0Sstevel@tonic-gate  *        undergoing the resize.
453*0Sstevel@tonic-gate  *    3.  cyclic_expand_xcall() raises interrupt level to CY_HIGH_LEVEL
454*0Sstevel@tonic-gate  *    4.  The contents of the old arrays are copied into the new arrays.
455*0Sstevel@tonic-gate  *    5.  The old cyclics array is bzero()'d
456*0Sstevel@tonic-gate  *    6.  The pointers are updated.
457*0Sstevel@tonic-gate  *
458*0Sstevel@tonic-gate  *  The producer/consumer buffer is dicier:  cyclic_expand_xcall() may have
459*0Sstevel@tonic-gate  *  interrupted cyclic_softint() in the middle of consumption. To resize the
460*0Sstevel@tonic-gate  *  producer/consumer buffer, we implement up to two buffers per soft interrupt
461*0Sstevel@tonic-gate  *  level:  a hard buffer (the buffer being produced into by cyclic_expire())
462*0Sstevel@tonic-gate  *  and a soft buffer (the buffer from which cyclic_softint() is consuming).
463*0Sstevel@tonic-gate  *  During normal operation, the hard buffer and soft buffer point to the
464*0Sstevel@tonic-gate  *  same underlying producer/consumer buffer.
465*0Sstevel@tonic-gate  *
466*0Sstevel@tonic-gate  *  During a resize, however, cyclic_expand_xcall() changes the hard buffer
467*0Sstevel@tonic-gate  *  to point to the new, larger producer/consumer buffer; all future
468*0Sstevel@tonic-gate  *  cyclic_expire()'s will produce into the new buffer.  cyclic_expand_xcall()
469*0Sstevel@tonic-gate  *  then posts a CY_LOCK_LEVEL soft interrupt, landing in cyclic_softint().
470*0Sstevel@tonic-gate  *
471*0Sstevel@tonic-gate  *  As under normal operation, cyclic_softint() will consume cyclics from
472*0Sstevel@tonic-gate  *  its soft buffer.  After the soft buffer is drained, however,
473*0Sstevel@tonic-gate  *  cyclic_softint() will see that the hard buffer has changed.  At that time,
474*0Sstevel@tonic-gate  *  cyclic_softint() will change its soft buffer to point to the hard buffer,
475*0Sstevel@tonic-gate  *  and repeat the producer/consumer buffer draining procedure.
476*0Sstevel@tonic-gate  *
477*0Sstevel@tonic-gate  *  After the new buffer is drained, cyclic_softint() will determine if both
478*0Sstevel@tonic-gate  *  soft levels have seen their new producer/consumer buffer.  If both have,
479*0Sstevel@tonic-gate  *  cyclic_softint() will post on the semaphore cyp_modify_wait.  If not, a
480*0Sstevel@tonic-gate  *  soft interrupt will be generated for the remaining level.
481*0Sstevel@tonic-gate  *
482*0Sstevel@tonic-gate  *  cyclic_expand() blocks on the cyp_modify_wait semaphore (a semaphore is
483*0Sstevel@tonic-gate  *  used instead of a condition variable because of the race between the
484*0Sstevel@tonic-gate  *  sema_p() in cyclic_expand() and the sema_v() in cyclic_softint()).  This
485*0Sstevel@tonic-gate  *  allows cyclic_expand() to know when the resize operation is complete;
486*0Sstevel@tonic-gate  *  all of the old buffers (the heap, the cyclics array and the producer/
487*0Sstevel@tonic-gate  *  consumer buffers) can be freed.
488*0Sstevel@tonic-gate  *
489*0Sstevel@tonic-gate  *  A final caveat on resizing:  we described step (5) in the
490*0Sstevel@tonic-gate  *  cyclic_expand_xcall() procedure without providing any motivation.  This
491*0Sstevel@tonic-gate  *  step addresses the problem of a cyclic_softint() attempting to decrement
492*0Sstevel@tonic-gate  *  a cy_pend count while interrupted by a cyclic_expand_xcall().  Because
493*0Sstevel@tonic-gate  *  cyclic_softint() has already called the handler by the time cy_pend is
494*0Sstevel@tonic-gate  *  decremented, we want to assure that it doesn't decrement a cy_pend
495*0Sstevel@tonic-gate  *  count in the old cyclics array.  By zeroing the old cyclics array in
496*0Sstevel@tonic-gate  *  cyclic_expand_xcall(), we are zeroing out every cy_pend count; when
497*0Sstevel@tonic-gate  *  cyclic_softint() attempts to compare&swap on the cy_pend count, it will
498*0Sstevel@tonic-gate  *  fail and recognize that the count has been zeroed.  cyclic_softint() will
499*0Sstevel@tonic-gate  *  update its stale copy of the cyp_cyclics pointer, re-read the cy_pend
500*0Sstevel@tonic-gate  *  count from the new cyclics array, and re-attempt the compare&swap.
501*0Sstevel@tonic-gate  *
502*0Sstevel@tonic-gate  *  Removals
503*0Sstevel@tonic-gate  *
504*0Sstevel@tonic-gate  *  Cyclic removals should be rare.  To simplify the implementation (and to
505*0Sstevel@tonic-gate  *  allow optimization for the cyclic_fire()/cyclic_expire()/cyclic_softint()
506*0Sstevel@tonic-gate  *  path), we force removals and adds to serialize on cpu_lock.
507*0Sstevel@tonic-gate  *
508*0Sstevel@tonic-gate  *  Cyclic removal is complicated by a guarantee made to the consumer of
509*0Sstevel@tonic-gate  *  the cyclic subsystem:  after cyclic_remove() returns, the cyclic handler
510*0Sstevel@tonic-gate  *  has returned and will never again be called.
511*0Sstevel@tonic-gate  *
512*0Sstevel@tonic-gate  *  Here is the procedure for cyclic removal:
513*0Sstevel@tonic-gate  *
514*0Sstevel@tonic-gate  *    1.  cyclic_remove() calls cyclic_remove_xcall() on the CPU undergoing
515*0Sstevel@tonic-gate  *        the removal.
516*0Sstevel@tonic-gate  *    2.  cyclic_remove_xcall() raises interrupt level to CY_HIGH_LEVEL
517*0Sstevel@tonic-gate  *    3.  The current expiration time for the removed cyclic is recorded.
518*0Sstevel@tonic-gate  *    4.  If the cy_pend count on the removed cyclic is non-zero, it
519*0Sstevel@tonic-gate  *        is copied into cyp_rpend and subsequently zeroed.
520*0Sstevel@tonic-gate  *    5.  The cyclic is removed from the heap
521*0Sstevel@tonic-gate  *    6.  If the root of the heap has changed, the backend is reprogrammed.
522*0Sstevel@tonic-gate  *    7.  If the cy_pend count was non-zero cyclic_remove() blocks on the
523*0Sstevel@tonic-gate  *        cyp_modify_wait semaphore.
524*0Sstevel@tonic-gate  *
525*0Sstevel@tonic-gate  *  The motivation for step (3) is explained in "Juggling", below.
526*0Sstevel@tonic-gate  *
527*0Sstevel@tonic-gate  *  The cy_pend count is decremented in cyclic_softint() after the cyclic
528*0Sstevel@tonic-gate  *  handler returns.  Thus, if we find a cy_pend count of zero in step
529*0Sstevel@tonic-gate  *  (4), we know that cyclic_remove() doesn't need to block.
530*0Sstevel@tonic-gate  *
531*0Sstevel@tonic-gate  *  If the cy_pend count is non-zero, however, we must block in cyclic_remove()
532*0Sstevel@tonic-gate  *  until cyclic_softint() has finished calling the cyclic handler.  To let
533*0Sstevel@tonic-gate  *  cyclic_softint() know that this cyclic has been removed, we zero the
534*0Sstevel@tonic-gate  *  cy_pend count.  This will cause cyclic_softint()'s compare&swap to fail.
535*0Sstevel@tonic-gate  *  When cyclic_softint() sees the zero cy_pend count, it knows that it's been
536*0Sstevel@tonic-gate  *  caught during a resize (see "Resizing", above) or that the cyclic has been
537*0Sstevel@tonic-gate  *  removed.  In the latter case, it calls cyclic_remove_pend() to call the
538*0Sstevel@tonic-gate  *  cyclic handler cyp_rpend - 1 times, and posts on cyp_modify_wait.
539*0Sstevel@tonic-gate  *
540*0Sstevel@tonic-gate  *  Juggling
541*0Sstevel@tonic-gate  *
542*0Sstevel@tonic-gate  *  At first glance, cyclic juggling seems to be a difficult problem.  The
543*0Sstevel@tonic-gate  *  subsystem must guarantee that a cyclic doesn't execute simultaneously on
544*0Sstevel@tonic-gate  *  different CPUs, while also assuring that a cyclic fires exactly once
545*0Sstevel@tonic-gate  *  per interval.  We solve this problem by leveraging a property of the
546*0Sstevel@tonic-gate  *  platform:  gethrtime() is required to increase in lock-step across
547*0Sstevel@tonic-gate  *  multiple CPUs.  Therefore, to juggle a cyclic, we remove it from its
548*0Sstevel@tonic-gate  *  CPU, recording its expiration time in the remove cross call (step (3)
549*0Sstevel@tonic-gate  *  in "Removing", above).  We then add the cyclic to the new CPU, explicitly
550*0Sstevel@tonic-gate  *  setting its expiration time to the time recorded in the removal.  This
551*0Sstevel@tonic-gate  *  leverages the existing cyclic expiry processing, which will compensate
552*0Sstevel@tonic-gate  *  for any time lost while juggling.
553*0Sstevel@tonic-gate  *
554*0Sstevel@tonic-gate  */
555*0Sstevel@tonic-gate #include <sys/cyclic_impl.h>
556*0Sstevel@tonic-gate #include <sys/sysmacros.h>
557*0Sstevel@tonic-gate #include <sys/systm.h>
558*0Sstevel@tonic-gate #include <sys/atomic.h>
559*0Sstevel@tonic-gate #include <sys/kmem.h>
560*0Sstevel@tonic-gate #include <sys/cmn_err.h>
561*0Sstevel@tonic-gate #include <sys/ddi.h>
562*0Sstevel@tonic-gate 
563*0Sstevel@tonic-gate #ifdef CYCLIC_TRACE
564*0Sstevel@tonic-gate 
565*0Sstevel@tonic-gate /*
566*0Sstevel@tonic-gate  * cyc_trace_enabled is for the benefit of kernel debuggers.
567*0Sstevel@tonic-gate  */
568*0Sstevel@tonic-gate int cyc_trace_enabled = 1;
569*0Sstevel@tonic-gate static cyc_tracebuf_t cyc_ptrace;
570*0Sstevel@tonic-gate static cyc_coverage_t cyc_coverage[CY_NCOVERAGE];
571*0Sstevel@tonic-gate 
572*0Sstevel@tonic-gate /*
573*0Sstevel@tonic-gate  * Seen this anywhere?
574*0Sstevel@tonic-gate  */
575*0Sstevel@tonic-gate static uint_t
576*0Sstevel@tonic-gate cyclic_coverage_hash(char *p)
577*0Sstevel@tonic-gate {
578*0Sstevel@tonic-gate 	unsigned int g;
579*0Sstevel@tonic-gate 	uint_t hval;
580*0Sstevel@tonic-gate 
581*0Sstevel@tonic-gate 	hval = 0;
582*0Sstevel@tonic-gate 	while (*p) {
583*0Sstevel@tonic-gate 		hval = (hval << 4) + *p++;
584*0Sstevel@tonic-gate 		if ((g = (hval & 0xf0000000)) != 0)
585*0Sstevel@tonic-gate 			hval ^= g >> 24;
586*0Sstevel@tonic-gate 		hval &= ~g;
587*0Sstevel@tonic-gate 	}
588*0Sstevel@tonic-gate 	return (hval);
589*0Sstevel@tonic-gate }
590*0Sstevel@tonic-gate 
591*0Sstevel@tonic-gate static void
592*0Sstevel@tonic-gate cyclic_coverage(char *why, int level, uint64_t arg0, uint64_t arg1)
593*0Sstevel@tonic-gate {
594*0Sstevel@tonic-gate 	uint_t ndx, orig;
595*0Sstevel@tonic-gate 
596*0Sstevel@tonic-gate 	for (ndx = orig = cyclic_coverage_hash(why) % CY_NCOVERAGE; ; ) {
597*0Sstevel@tonic-gate 		if (cyc_coverage[ndx].cyv_why == why)
598*0Sstevel@tonic-gate 			break;
599*0Sstevel@tonic-gate 
600*0Sstevel@tonic-gate 		if (cyc_coverage[ndx].cyv_why != NULL ||
601*0Sstevel@tonic-gate 		    casptr(&cyc_coverage[ndx].cyv_why, NULL, why) != NULL) {
602*0Sstevel@tonic-gate 
603*0Sstevel@tonic-gate 			if (++ndx == CY_NCOVERAGE)
604*0Sstevel@tonic-gate 				ndx = 0;
605*0Sstevel@tonic-gate 
606*0Sstevel@tonic-gate 			if (ndx == orig)
607*0Sstevel@tonic-gate 				panic("too many cyclic coverage points");
608*0Sstevel@tonic-gate 			continue;
609*0Sstevel@tonic-gate 		}
610*0Sstevel@tonic-gate 
611*0Sstevel@tonic-gate 		/*
612*0Sstevel@tonic-gate 		 * If we're here, we have successfully swung our guy into
613*0Sstevel@tonic-gate 		 * the position at "ndx".
614*0Sstevel@tonic-gate 		 */
615*0Sstevel@tonic-gate 		break;
616*0Sstevel@tonic-gate 	}
617*0Sstevel@tonic-gate 
618*0Sstevel@tonic-gate 	if (level == CY_PASSIVE_LEVEL)
619*0Sstevel@tonic-gate 		cyc_coverage[ndx].cyv_passive_count++;
620*0Sstevel@tonic-gate 	else
621*0Sstevel@tonic-gate 		cyc_coverage[ndx].cyv_count[level]++;
622*0Sstevel@tonic-gate 
623*0Sstevel@tonic-gate 	cyc_coverage[ndx].cyv_arg0 = arg0;
624*0Sstevel@tonic-gate 	cyc_coverage[ndx].cyv_arg1 = arg1;
625*0Sstevel@tonic-gate }
626*0Sstevel@tonic-gate 
627*0Sstevel@tonic-gate #define	CYC_TRACE(cpu, level, why, arg0, arg1) \
628*0Sstevel@tonic-gate 	CYC_TRACE_IMPL(&cpu->cyp_trace[level], level, why, arg0, arg1)
629*0Sstevel@tonic-gate 
630*0Sstevel@tonic-gate #define	CYC_PTRACE(why, arg0, arg1) \
631*0Sstevel@tonic-gate 	CYC_TRACE_IMPL(&cyc_ptrace, CY_PASSIVE_LEVEL, why, arg0, arg1)
632*0Sstevel@tonic-gate 
633*0Sstevel@tonic-gate #define	CYC_TRACE_IMPL(buf, level, why, a0, a1) { \
634*0Sstevel@tonic-gate 	if (panicstr == NULL) { \
635*0Sstevel@tonic-gate 		int _ndx = (buf)->cyt_ndx; \
636*0Sstevel@tonic-gate 		cyc_tracerec_t *_rec = &(buf)->cyt_buf[_ndx]; \
637*0Sstevel@tonic-gate 		(buf)->cyt_ndx = (++_ndx == CY_NTRACEREC) ? 0 : _ndx; \
638*0Sstevel@tonic-gate 		_rec->cyt_tstamp = gethrtime_unscaled(); \
639*0Sstevel@tonic-gate 		_rec->cyt_why = (why); \
640*0Sstevel@tonic-gate 		_rec->cyt_arg0 = (uint64_t)(uintptr_t)(a0); \
641*0Sstevel@tonic-gate 		_rec->cyt_arg1 = (uint64_t)(uintptr_t)(a1); \
642*0Sstevel@tonic-gate 		cyclic_coverage(why, level,	\
643*0Sstevel@tonic-gate 		    (uint64_t)(uintptr_t)(a0), (uint64_t)(uintptr_t)(a1)); \
644*0Sstevel@tonic-gate 	} \
645*0Sstevel@tonic-gate }
646*0Sstevel@tonic-gate 
647*0Sstevel@tonic-gate #else
648*0Sstevel@tonic-gate 
649*0Sstevel@tonic-gate static int cyc_trace_enabled = 0;
650*0Sstevel@tonic-gate 
651*0Sstevel@tonic-gate #define	CYC_TRACE(cpu, level, why, arg0, arg1)
652*0Sstevel@tonic-gate #define	CYC_PTRACE(why, arg0, arg1)
653*0Sstevel@tonic-gate 
654*0Sstevel@tonic-gate #endif
655*0Sstevel@tonic-gate 
656*0Sstevel@tonic-gate #define	CYC_TRACE0(cpu, level, why) CYC_TRACE(cpu, level, why, 0, 0)
657*0Sstevel@tonic-gate #define	CYC_TRACE1(cpu, level, why, arg0) CYC_TRACE(cpu, level, why, arg0, 0)
658*0Sstevel@tonic-gate 
659*0Sstevel@tonic-gate #define	CYC_PTRACE0(why) CYC_PTRACE(why, 0, 0)
660*0Sstevel@tonic-gate #define	CYC_PTRACE1(why, arg0) CYC_PTRACE(why, arg0, 0)
661*0Sstevel@tonic-gate 
662*0Sstevel@tonic-gate static kmem_cache_t *cyclic_id_cache;
663*0Sstevel@tonic-gate static cyc_id_t *cyclic_id_head;
664*0Sstevel@tonic-gate static hrtime_t cyclic_resolution;
665*0Sstevel@tonic-gate static cyc_backend_t cyclic_backend;
666*0Sstevel@tonic-gate 
667*0Sstevel@tonic-gate /*
668*0Sstevel@tonic-gate  * Returns 1 if the upheap propagated to the root, 0 if it did not.  This
669*0Sstevel@tonic-gate  * allows the caller to reprogram the backend only when the root has been
670*0Sstevel@tonic-gate  * modified.
671*0Sstevel@tonic-gate  */
672*0Sstevel@tonic-gate static int
673*0Sstevel@tonic-gate cyclic_upheap(cyc_cpu_t *cpu, cyc_index_t ndx)
674*0Sstevel@tonic-gate {
675*0Sstevel@tonic-gate 	cyclic_t *cyclics;
676*0Sstevel@tonic-gate 	cyc_index_t *heap;
677*0Sstevel@tonic-gate 	cyc_index_t heap_parent, heap_current = ndx;
678*0Sstevel@tonic-gate 	cyc_index_t parent, current;
679*0Sstevel@tonic-gate 
680*0Sstevel@tonic-gate 	if (heap_current == 0)
681*0Sstevel@tonic-gate 		return (1);
682*0Sstevel@tonic-gate 
683*0Sstevel@tonic-gate 	heap = cpu->cyp_heap;
684*0Sstevel@tonic-gate 	cyclics = cpu->cyp_cyclics;
685*0Sstevel@tonic-gate 	heap_parent = CYC_HEAP_PARENT(heap_current);
686*0Sstevel@tonic-gate 
687*0Sstevel@tonic-gate 	for (;;) {
688*0Sstevel@tonic-gate 		current = heap[heap_current];
689*0Sstevel@tonic-gate 		parent = heap[heap_parent];
690*0Sstevel@tonic-gate 
691*0Sstevel@tonic-gate 		/*
692*0Sstevel@tonic-gate 		 * We have an expiration time later than our parent; we're
693*0Sstevel@tonic-gate 		 * done.
694*0Sstevel@tonic-gate 		 */
695*0Sstevel@tonic-gate 		if (cyclics[current].cy_expire >= cyclics[parent].cy_expire)
696*0Sstevel@tonic-gate 			return (0);
697*0Sstevel@tonic-gate 
698*0Sstevel@tonic-gate 		/*
699*0Sstevel@tonic-gate 		 * We need to swap with our parent, and continue up the heap.
700*0Sstevel@tonic-gate 		 */
701*0Sstevel@tonic-gate 		heap[heap_parent] = current;
702*0Sstevel@tonic-gate 		heap[heap_current] = parent;
703*0Sstevel@tonic-gate 
704*0Sstevel@tonic-gate 		/*
705*0Sstevel@tonic-gate 		 * If we just reached the root, we're done.
706*0Sstevel@tonic-gate 		 */
707*0Sstevel@tonic-gate 		if (heap_parent == 0)
708*0Sstevel@tonic-gate 			return (1);
709*0Sstevel@tonic-gate 
710*0Sstevel@tonic-gate 		heap_current = heap_parent;
711*0Sstevel@tonic-gate 		heap_parent = CYC_HEAP_PARENT(heap_current);
712*0Sstevel@tonic-gate 	}
713*0Sstevel@tonic-gate }
714*0Sstevel@tonic-gate 
715*0Sstevel@tonic-gate static void
716*0Sstevel@tonic-gate cyclic_downheap(cyc_cpu_t *cpu, cyc_index_t ndx)
717*0Sstevel@tonic-gate {
718*0Sstevel@tonic-gate 	cyclic_t *cyclics = cpu->cyp_cyclics;
719*0Sstevel@tonic-gate 	cyc_index_t *heap = cpu->cyp_heap;
720*0Sstevel@tonic-gate 
721*0Sstevel@tonic-gate 	cyc_index_t heap_left, heap_right, heap_me = ndx;
722*0Sstevel@tonic-gate 	cyc_index_t left, right, me;
723*0Sstevel@tonic-gate 	cyc_index_t nelems = cpu->cyp_nelems;
724*0Sstevel@tonic-gate 
725*0Sstevel@tonic-gate 	for (;;) {
726*0Sstevel@tonic-gate 		/*
727*0Sstevel@tonic-gate 		 * If we don't have a left child (i.e., we're a leaf), we're
728*0Sstevel@tonic-gate 		 * done.
729*0Sstevel@tonic-gate 		 */
730*0Sstevel@tonic-gate 		if ((heap_left = CYC_HEAP_LEFT(heap_me)) >= nelems)
731*0Sstevel@tonic-gate 			return;
732*0Sstevel@tonic-gate 
733*0Sstevel@tonic-gate 		left = heap[heap_left];
734*0Sstevel@tonic-gate 		me = heap[heap_me];
735*0Sstevel@tonic-gate 
736*0Sstevel@tonic-gate 		heap_right = CYC_HEAP_RIGHT(heap_me);
737*0Sstevel@tonic-gate 
738*0Sstevel@tonic-gate 		/*
739*0Sstevel@tonic-gate 		 * Even if we don't have a right child, we still need to compare
740*0Sstevel@tonic-gate 		 * our expiration time against that of our left child.
741*0Sstevel@tonic-gate 		 */
742*0Sstevel@tonic-gate 		if (heap_right >= nelems)
743*0Sstevel@tonic-gate 			goto comp_left;
744*0Sstevel@tonic-gate 
745*0Sstevel@tonic-gate 		right = heap[heap_right];
746*0Sstevel@tonic-gate 
747*0Sstevel@tonic-gate 		/*
748*0Sstevel@tonic-gate 		 * We have both a left and a right child.  We need to compare
749*0Sstevel@tonic-gate 		 * the expiration times of the children to determine which
750*0Sstevel@tonic-gate 		 * expires earlier.
751*0Sstevel@tonic-gate 		 */
752*0Sstevel@tonic-gate 		if (cyclics[right].cy_expire < cyclics[left].cy_expire) {
753*0Sstevel@tonic-gate 			/*
754*0Sstevel@tonic-gate 			 * Our right child is the earlier of our children.
755*0Sstevel@tonic-gate 			 * We'll now compare our expiration time to its; if
756*0Sstevel@tonic-gate 			 * ours is the earlier, we're done.
757*0Sstevel@tonic-gate 			 */
758*0Sstevel@tonic-gate 			if (cyclics[me].cy_expire <= cyclics[right].cy_expire)
759*0Sstevel@tonic-gate 				return;
760*0Sstevel@tonic-gate 
761*0Sstevel@tonic-gate 			/*
762*0Sstevel@tonic-gate 			 * Our right child expires earlier than we do; swap
763*0Sstevel@tonic-gate 			 * with our right child, and descend right.
764*0Sstevel@tonic-gate 			 */
765*0Sstevel@tonic-gate 			heap[heap_right] = me;
766*0Sstevel@tonic-gate 			heap[heap_me] = right;
767*0Sstevel@tonic-gate 			heap_me = heap_right;
768*0Sstevel@tonic-gate 			continue;
769*0Sstevel@tonic-gate 		}
770*0Sstevel@tonic-gate 
771*0Sstevel@tonic-gate comp_left:
772*0Sstevel@tonic-gate 		/*
773*0Sstevel@tonic-gate 		 * Our left child is the earlier of our children (or we have
774*0Sstevel@tonic-gate 		 * no right child).  We'll now compare our expiration time
775*0Sstevel@tonic-gate 		 * to its; if ours is the earlier, we're done.
776*0Sstevel@tonic-gate 		 */
777*0Sstevel@tonic-gate 		if (cyclics[me].cy_expire <= cyclics[left].cy_expire)
778*0Sstevel@tonic-gate 			return;
779*0Sstevel@tonic-gate 
780*0Sstevel@tonic-gate 		/*
781*0Sstevel@tonic-gate 		 * Our left child expires earlier than we do; swap with our
782*0Sstevel@tonic-gate 		 * left child, and descend left.
783*0Sstevel@tonic-gate 		 */
784*0Sstevel@tonic-gate 		heap[heap_left] = me;
785*0Sstevel@tonic-gate 		heap[heap_me] = left;
786*0Sstevel@tonic-gate 		heap_me = heap_left;
787*0Sstevel@tonic-gate 	}
788*0Sstevel@tonic-gate }
789*0Sstevel@tonic-gate 
790*0Sstevel@tonic-gate static void
791*0Sstevel@tonic-gate cyclic_expire(cyc_cpu_t *cpu, cyc_index_t ndx, cyclic_t *cyclic)
792*0Sstevel@tonic-gate {
793*0Sstevel@tonic-gate 	cyc_backend_t *be = cpu->cyp_backend;
794*0Sstevel@tonic-gate 	cyc_level_t level = cyclic->cy_level;
795*0Sstevel@tonic-gate 
796*0Sstevel@tonic-gate 	/*
797*0Sstevel@tonic-gate 	 * If this is a CY_HIGH_LEVEL cyclic, just call the handler; we don't
798*0Sstevel@tonic-gate 	 * need to worry about the pend count for CY_HIGH_LEVEL cyclics.
799*0Sstevel@tonic-gate 	 */
800*0Sstevel@tonic-gate 	if (level == CY_HIGH_LEVEL) {
801*0Sstevel@tonic-gate 		cyc_func_t handler = cyclic->cy_handler;
802*0Sstevel@tonic-gate 		void *arg = cyclic->cy_arg;
803*0Sstevel@tonic-gate 
804*0Sstevel@tonic-gate 		CYC_TRACE(cpu, CY_HIGH_LEVEL, "handler-in", handler, arg);
805*0Sstevel@tonic-gate 		(*handler)(arg);
806*0Sstevel@tonic-gate 		CYC_TRACE(cpu, CY_HIGH_LEVEL, "handler-out", handler, arg);
807*0Sstevel@tonic-gate 
808*0Sstevel@tonic-gate 		return;
809*0Sstevel@tonic-gate 	}
810*0Sstevel@tonic-gate 
811*0Sstevel@tonic-gate 	/*
812*0Sstevel@tonic-gate 	 * We're at CY_HIGH_LEVEL; this modification to cy_pend need not
813*0Sstevel@tonic-gate 	 * be atomic (the high interrupt level assures that it will appear
814*0Sstevel@tonic-gate 	 * atomic to any softint currently running).
815*0Sstevel@tonic-gate 	 */
816*0Sstevel@tonic-gate 	if (cyclic->cy_pend++ == 0) {
817*0Sstevel@tonic-gate 		cyc_softbuf_t *softbuf = &cpu->cyp_softbuf[level];
818*0Sstevel@tonic-gate 		cyc_pcbuffer_t *pc = &softbuf->cys_buf[softbuf->cys_hard];
819*0Sstevel@tonic-gate 
820*0Sstevel@tonic-gate 		/*
821*0Sstevel@tonic-gate 		 * We need to enqueue this cyclic in the soft buffer.
822*0Sstevel@tonic-gate 		 */
823*0Sstevel@tonic-gate 		CYC_TRACE(cpu, CY_HIGH_LEVEL, "expire-enq", cyclic,
824*0Sstevel@tonic-gate 		    pc->cypc_prodndx);
825*0Sstevel@tonic-gate 		pc->cypc_buf[pc->cypc_prodndx++ & pc->cypc_sizemask] = ndx;
826*0Sstevel@tonic-gate 
827*0Sstevel@tonic-gate 		ASSERT(pc->cypc_prodndx != pc->cypc_consndx);
828*0Sstevel@tonic-gate 	} else {
829*0Sstevel@tonic-gate 		/*
830*0Sstevel@tonic-gate 		 * If the pend count is zero after we incremented it, then
831*0Sstevel@tonic-gate 		 * we've wrapped (i.e. we had a cy_pend count of over four
832*0Sstevel@tonic-gate 		 * billion.  In this case, we clamp the pend count at
833*0Sstevel@tonic-gate 		 * UINT32_MAX.  Yes, cyclics can be lost in this case.
834*0Sstevel@tonic-gate 		 */
835*0Sstevel@tonic-gate 		if (cyclic->cy_pend == 0) {
836*0Sstevel@tonic-gate 			CYC_TRACE1(cpu, CY_HIGH_LEVEL, "expire-wrap", cyclic);
837*0Sstevel@tonic-gate 			cyclic->cy_pend = UINT32_MAX;
838*0Sstevel@tonic-gate 		}
839*0Sstevel@tonic-gate 
840*0Sstevel@tonic-gate 		CYC_TRACE(cpu, CY_HIGH_LEVEL, "expire-bump", cyclic, 0);
841*0Sstevel@tonic-gate 	}
842*0Sstevel@tonic-gate 
843*0Sstevel@tonic-gate 	be->cyb_softint(be->cyb_arg, cyclic->cy_level);
844*0Sstevel@tonic-gate }
845*0Sstevel@tonic-gate 
846*0Sstevel@tonic-gate /*
847*0Sstevel@tonic-gate  *  cyclic_fire(cpu_t *)
848*0Sstevel@tonic-gate  *
849*0Sstevel@tonic-gate  *  Overview
850*0Sstevel@tonic-gate  *
851*0Sstevel@tonic-gate  *    cyclic_fire() is the cyclic subsystem's CY_HIGH_LEVEL interrupt handler.
852*0Sstevel@tonic-gate  *    Called by the cyclic backend.
853*0Sstevel@tonic-gate  *
854*0Sstevel@tonic-gate  *  Arguments and notes
855*0Sstevel@tonic-gate  *
856*0Sstevel@tonic-gate  *    The only argument is the CPU on which the interrupt is executing;
857*0Sstevel@tonic-gate  *    backends must call into cyclic_fire() on the specified CPU.
858*0Sstevel@tonic-gate  *
859*0Sstevel@tonic-gate  *    cyclic_fire() may be called spuriously without ill effect.  Optimal
860*0Sstevel@tonic-gate  *    backends will call into cyclic_fire() at or shortly after the time
861*0Sstevel@tonic-gate  *    requested via cyb_reprogram().  However, calling cyclic_fire()
862*0Sstevel@tonic-gate  *    arbitrarily late will only manifest latency bubbles; the correctness
863*0Sstevel@tonic-gate  *    of the cyclic subsystem does not rely on the timeliness of the backend.
864*0Sstevel@tonic-gate  *
865*0Sstevel@tonic-gate  *    cyclic_fire() is wait-free; it will not block or spin.
866*0Sstevel@tonic-gate  *
867*0Sstevel@tonic-gate  *  Return values
868*0Sstevel@tonic-gate  *
869*0Sstevel@tonic-gate  *    None.
870*0Sstevel@tonic-gate  *
871*0Sstevel@tonic-gate  *  Caller's context
872*0Sstevel@tonic-gate  *
873*0Sstevel@tonic-gate  *    cyclic_fire() must be called from CY_HIGH_LEVEL interrupt context.
874*0Sstevel@tonic-gate  */
875*0Sstevel@tonic-gate void
876*0Sstevel@tonic-gate cyclic_fire(cpu_t *c)
877*0Sstevel@tonic-gate {
878*0Sstevel@tonic-gate 	cyc_cpu_t *cpu = c->cpu_cyclic;
879*0Sstevel@tonic-gate 	cyc_backend_t *be = cpu->cyp_backend;
880*0Sstevel@tonic-gate 	cyc_index_t *heap = cpu->cyp_heap;
881*0Sstevel@tonic-gate 	cyclic_t *cyclic, *cyclics = cpu->cyp_cyclics;
882*0Sstevel@tonic-gate 	void *arg = be->cyb_arg;
883*0Sstevel@tonic-gate 	hrtime_t now = gethrtime();
884*0Sstevel@tonic-gate 	hrtime_t exp;
885*0Sstevel@tonic-gate 
886*0Sstevel@tonic-gate 	CYC_TRACE(cpu, CY_HIGH_LEVEL, "fire", now, 0);
887*0Sstevel@tonic-gate 
888*0Sstevel@tonic-gate 	if (cpu->cyp_nelems == 0) {
889*0Sstevel@tonic-gate 		/*
890*0Sstevel@tonic-gate 		 * This is a spurious fire.  Count it as such, and blow
891*0Sstevel@tonic-gate 		 * out of here.
892*0Sstevel@tonic-gate 		 */
893*0Sstevel@tonic-gate 		CYC_TRACE0(cpu, CY_HIGH_LEVEL, "fire-spurious");
894*0Sstevel@tonic-gate 		return;
895*0Sstevel@tonic-gate 	}
896*0Sstevel@tonic-gate 
897*0Sstevel@tonic-gate 	for (;;) {
898*0Sstevel@tonic-gate 		cyc_index_t ndx = heap[0];
899*0Sstevel@tonic-gate 
900*0Sstevel@tonic-gate 		cyclic = &cyclics[ndx];
901*0Sstevel@tonic-gate 
902*0Sstevel@tonic-gate 		ASSERT(!(cyclic->cy_flags & CYF_FREE));
903*0Sstevel@tonic-gate 
904*0Sstevel@tonic-gate 		CYC_TRACE(cpu, CY_HIGH_LEVEL, "fire-check", cyclic,
905*0Sstevel@tonic-gate 		    cyclic->cy_expire);
906*0Sstevel@tonic-gate 
907*0Sstevel@tonic-gate 		if ((exp = cyclic->cy_expire) > now)
908*0Sstevel@tonic-gate 			break;
909*0Sstevel@tonic-gate 
910*0Sstevel@tonic-gate 		cyclic_expire(cpu, ndx, cyclic);
911*0Sstevel@tonic-gate 
912*0Sstevel@tonic-gate 		/*
913*0Sstevel@tonic-gate 		 * If this cyclic will be set to next expire in the distant
914*0Sstevel@tonic-gate 		 * past, we have one of two situations:
915*0Sstevel@tonic-gate 		 *
916*0Sstevel@tonic-gate 		 *   a)	This is the first firing of a cyclic which had
917*0Sstevel@tonic-gate 		 *	cy_expire set to 0.
918*0Sstevel@tonic-gate 		 *
919*0Sstevel@tonic-gate 		 *   b)	We are tragically late for a cyclic -- most likely
920*0Sstevel@tonic-gate 		 *	due to being in the debugger.
921*0Sstevel@tonic-gate 		 *
922*0Sstevel@tonic-gate 		 * In either case, we set the new expiration time to be the
923*0Sstevel@tonic-gate 		 * the next interval boundary.  This assures that the
924*0Sstevel@tonic-gate 		 * expiration time modulo the interval is invariant.
925*0Sstevel@tonic-gate 		 *
926*0Sstevel@tonic-gate 		 * We arbitrarily define "distant" to be one second (one second
927*0Sstevel@tonic-gate 		 * is chosen because it's shorter than any foray to the
928*0Sstevel@tonic-gate 		 * debugger while still being longer than any legitimate
929*0Sstevel@tonic-gate 		 * stretch at CY_HIGH_LEVEL).
930*0Sstevel@tonic-gate 		 */
931*0Sstevel@tonic-gate 		exp += cyclic->cy_interval;
932*0Sstevel@tonic-gate 
933*0Sstevel@tonic-gate 		if (now - exp > NANOSEC) {
934*0Sstevel@tonic-gate 			hrtime_t interval = cyclic->cy_interval;
935*0Sstevel@tonic-gate 
936*0Sstevel@tonic-gate 			CYC_TRACE(cpu, CY_HIGH_LEVEL, exp == interval ?
937*0Sstevel@tonic-gate 			    "fire-first" : "fire-swing", now, exp);
938*0Sstevel@tonic-gate 
939*0Sstevel@tonic-gate 			exp += ((now - exp) / interval + 1) * interval;
940*0Sstevel@tonic-gate 		}
941*0Sstevel@tonic-gate 
942*0Sstevel@tonic-gate 		cyclic->cy_expire = exp;
943*0Sstevel@tonic-gate 		cyclic_downheap(cpu, 0);
944*0Sstevel@tonic-gate 	}
945*0Sstevel@tonic-gate 
946*0Sstevel@tonic-gate 	/*
947*0Sstevel@tonic-gate 	 * Now we have a cyclic in the root slot which isn't in the past;
948*0Sstevel@tonic-gate 	 * reprogram the interrupt source.
949*0Sstevel@tonic-gate 	 */
950*0Sstevel@tonic-gate 	be->cyb_reprogram(arg, exp);
951*0Sstevel@tonic-gate }
952*0Sstevel@tonic-gate 
953*0Sstevel@tonic-gate static void
954*0Sstevel@tonic-gate cyclic_remove_pend(cyc_cpu_t *cpu, cyc_level_t level, cyclic_t *cyclic)
955*0Sstevel@tonic-gate {
956*0Sstevel@tonic-gate 	cyc_func_t handler = cyclic->cy_handler;
957*0Sstevel@tonic-gate 	void *arg = cyclic->cy_arg;
958*0Sstevel@tonic-gate 	uint32_t i, rpend = cpu->cyp_rpend - 1;
959*0Sstevel@tonic-gate 
960*0Sstevel@tonic-gate 	ASSERT(cyclic->cy_flags & CYF_FREE);
961*0Sstevel@tonic-gate 	ASSERT(cyclic->cy_pend == 0);
962*0Sstevel@tonic-gate 	ASSERT(cpu->cyp_state == CYS_REMOVING);
963*0Sstevel@tonic-gate 	ASSERT(cpu->cyp_rpend > 0);
964*0Sstevel@tonic-gate 
965*0Sstevel@tonic-gate 	CYC_TRACE(cpu, level, "remove-rpend", cyclic, cpu->cyp_rpend);
966*0Sstevel@tonic-gate 
967*0Sstevel@tonic-gate 	/*
968*0Sstevel@tonic-gate 	 * Note that we only call the handler cyp_rpend - 1 times; this is
969*0Sstevel@tonic-gate 	 * to account for the handler call in cyclic_softint().
970*0Sstevel@tonic-gate 	 */
971*0Sstevel@tonic-gate 	for (i = 0; i < rpend; i++) {
972*0Sstevel@tonic-gate 		CYC_TRACE(cpu, level, "rpend-in", handler, arg);
973*0Sstevel@tonic-gate 		(*handler)(arg);
974*0Sstevel@tonic-gate 		CYC_TRACE(cpu, level, "rpend-out", handler, arg);
975*0Sstevel@tonic-gate 	}
976*0Sstevel@tonic-gate 
977*0Sstevel@tonic-gate 	/*
978*0Sstevel@tonic-gate 	 * We can now let the remove operation complete.
979*0Sstevel@tonic-gate 	 */
980*0Sstevel@tonic-gate 	sema_v(&cpu->cyp_modify_wait);
981*0Sstevel@tonic-gate }
982*0Sstevel@tonic-gate 
983*0Sstevel@tonic-gate /*
984*0Sstevel@tonic-gate  *  cyclic_softint(cpu_t *cpu, cyc_level_t level)
985*0Sstevel@tonic-gate  *
986*0Sstevel@tonic-gate  *  Overview
987*0Sstevel@tonic-gate  *
988*0Sstevel@tonic-gate  *    cyclic_softint() is the cyclic subsystem's CY_LOCK_LEVEL and CY_LOW_LEVEL
989*0Sstevel@tonic-gate  *    soft interrupt handler.  Called by the cyclic backend.
990*0Sstevel@tonic-gate  *
991*0Sstevel@tonic-gate  *  Arguments and notes
992*0Sstevel@tonic-gate  *
993*0Sstevel@tonic-gate  *    The first argument to cyclic_softint() is the CPU on which the interrupt
994*0Sstevel@tonic-gate  *    is executing; backends must call into cyclic_softint() on the specified
995*0Sstevel@tonic-gate  *    CPU.  The second argument is the level of the soft interrupt; it must
996*0Sstevel@tonic-gate  *    be one of CY_LOCK_LEVEL or CY_LOW_LEVEL.
997*0Sstevel@tonic-gate  *
998*0Sstevel@tonic-gate  *    cyclic_softint() will call the handlers for cyclics pending at the
999*0Sstevel@tonic-gate  *    specified level.  cyclic_softint() will not return until all pending
1000*0Sstevel@tonic-gate  *    cyclics at the specified level have been dealt with; intervening
1001*0Sstevel@tonic-gate  *    CY_HIGH_LEVEL interrupts which enqueue cyclics at the specified level
1002*0Sstevel@tonic-gate  *    may therefore prolong cyclic_softint().
1003*0Sstevel@tonic-gate  *
1004*0Sstevel@tonic-gate  *    cyclic_softint() never disables interrupts, and, if neither a
1005*0Sstevel@tonic-gate  *    cyclic_add() nor a cyclic_remove() is pending on the specified CPU, is
1006*0Sstevel@tonic-gate  *    lock-free.  This assures that in the common case, cyclic_softint()
1007*0Sstevel@tonic-gate  *    completes without blocking, and never starves cyclic_fire().  If either
1008*0Sstevel@tonic-gate  *    cyclic_add() or cyclic_remove() is pending, cyclic_softint() may grab
1009*0Sstevel@tonic-gate  *    a dispatcher lock.
1010*0Sstevel@tonic-gate  *
1011*0Sstevel@tonic-gate  *    While cyclic_softint() is designed for bounded latency, it is obviously
1012*0Sstevel@tonic-gate  *    at the mercy of its cyclic handlers.  Because cyclic handlers may block
1013*0Sstevel@tonic-gate  *    arbitrarily, callers of cyclic_softint() should not rely upon
1014*0Sstevel@tonic-gate  *    deterministic completion.
1015*0Sstevel@tonic-gate  *
1016*0Sstevel@tonic-gate  *    cyclic_softint() may be called spuriously without ill effect.
1017*0Sstevel@tonic-gate  *
1018*0Sstevel@tonic-gate  *  Return value
1019*0Sstevel@tonic-gate  *
1020*0Sstevel@tonic-gate  *    None.
1021*0Sstevel@tonic-gate  *
1022*0Sstevel@tonic-gate  *  Caller's context
1023*0Sstevel@tonic-gate  *
1024*0Sstevel@tonic-gate  *    The caller must be executing in soft interrupt context at either
1025*0Sstevel@tonic-gate  *    CY_LOCK_LEVEL or CY_LOW_LEVEL.  The level passed to cyclic_softint()
1026*0Sstevel@tonic-gate  *    must match the level at which it is executing.  On optimal backends,
1027*0Sstevel@tonic-gate  *    the caller will hold no locks.  In any case, the caller may not hold
1028*0Sstevel@tonic-gate  *    cpu_lock or any lock acquired by any cyclic handler or held across
1029*0Sstevel@tonic-gate  *    any of cyclic_add(), cyclic_remove(), cyclic_bind() or cyclic_juggle().
1030*0Sstevel@tonic-gate  */
1031*0Sstevel@tonic-gate void
1032*0Sstevel@tonic-gate cyclic_softint(cpu_t *c, cyc_level_t level)
1033*0Sstevel@tonic-gate {
1034*0Sstevel@tonic-gate 	cyc_cpu_t *cpu = c->cpu_cyclic;
1035*0Sstevel@tonic-gate 	cyc_softbuf_t *softbuf;
1036*0Sstevel@tonic-gate 	int soft, *buf, consndx, resized = 0, intr_resized = 0;
1037*0Sstevel@tonic-gate 	cyc_pcbuffer_t *pc;
1038*0Sstevel@tonic-gate 	cyclic_t *cyclics = cpu->cyp_cyclics;
1039*0Sstevel@tonic-gate 	int sizemask;
1040*0Sstevel@tonic-gate 
1041*0Sstevel@tonic-gate 	CYC_TRACE(cpu, level, "softint", cyclics, 0);
1042*0Sstevel@tonic-gate 
1043*0Sstevel@tonic-gate 	ASSERT(level < CY_LOW_LEVEL + CY_SOFT_LEVELS);
1044*0Sstevel@tonic-gate 
1045*0Sstevel@tonic-gate 	softbuf = &cpu->cyp_softbuf[level];
1046*0Sstevel@tonic-gate top:
1047*0Sstevel@tonic-gate 	soft = softbuf->cys_soft;
1048*0Sstevel@tonic-gate 	ASSERT(soft == 0 || soft == 1);
1049*0Sstevel@tonic-gate 
1050*0Sstevel@tonic-gate 	pc = &softbuf->cys_buf[soft];
1051*0Sstevel@tonic-gate 	buf = pc->cypc_buf;
1052*0Sstevel@tonic-gate 	consndx = pc->cypc_consndx;
1053*0Sstevel@tonic-gate 	sizemask = pc->cypc_sizemask;
1054*0Sstevel@tonic-gate 
1055*0Sstevel@tonic-gate 	CYC_TRACE(cpu, level, "softint-top", cyclics, pc);
1056*0Sstevel@tonic-gate 
1057*0Sstevel@tonic-gate 	while (consndx != pc->cypc_prodndx) {
1058*0Sstevel@tonic-gate 		int pend, npend, opend;
1059*0Sstevel@tonic-gate 		int consmasked = consndx & sizemask;
1060*0Sstevel@tonic-gate 		cyclic_t *cyclic = &cyclics[buf[consmasked]];
1061*0Sstevel@tonic-gate 		cyc_func_t handler = cyclic->cy_handler;
1062*0Sstevel@tonic-gate 		void *arg = cyclic->cy_arg;
1063*0Sstevel@tonic-gate 
1064*0Sstevel@tonic-gate 		ASSERT(buf[consmasked] < cpu->cyp_size);
1065*0Sstevel@tonic-gate 		CYC_TRACE(cpu, level, "consuming", consndx, cyclic);
1066*0Sstevel@tonic-gate 
1067*0Sstevel@tonic-gate 		/*
1068*0Sstevel@tonic-gate 		 * We have found this cyclic in the pcbuffer.  We know that
1069*0Sstevel@tonic-gate 		 * one of the following is true:
1070*0Sstevel@tonic-gate 		 *
1071*0Sstevel@tonic-gate 		 *  (a)	The pend is non-zero.  We need to execute the handler
1072*0Sstevel@tonic-gate 		 *	at least once.
1073*0Sstevel@tonic-gate 		 *
1074*0Sstevel@tonic-gate 		 *  (b)	The pend _was_ non-zero, but it's now zero due to a
1075*0Sstevel@tonic-gate 		 *	resize.  We will call the handler once, see that we
1076*0Sstevel@tonic-gate 		 *	are in this case, and read the new cyclics buffer
1077*0Sstevel@tonic-gate 		 *	(and hence the old non-zero pend).
1078*0Sstevel@tonic-gate 		 *
1079*0Sstevel@tonic-gate 		 *  (c)	The pend _was_ non-zero, but it's now zero due to a
1080*0Sstevel@tonic-gate 		 *	removal.  We will call the handler once, see that we
1081*0Sstevel@tonic-gate 		 *	are in this case, and call into cyclic_remove_pend()
1082*0Sstevel@tonic-gate 		 *	to call the cyclic rpend times.  We will take into
1083*0Sstevel@tonic-gate 		 *	account that we have already called the handler once.
1084*0Sstevel@tonic-gate 		 *
1085*0Sstevel@tonic-gate 		 * Point is:  it's safe to call the handler without first
1086*0Sstevel@tonic-gate 		 * checking the pend.
1087*0Sstevel@tonic-gate 		 */
1088*0Sstevel@tonic-gate 		do {
1089*0Sstevel@tonic-gate 			CYC_TRACE(cpu, level, "handler-in", handler, arg);
1090*0Sstevel@tonic-gate 			(*handler)(arg);
1091*0Sstevel@tonic-gate 			CYC_TRACE(cpu, level, "handler-out", handler, arg);
1092*0Sstevel@tonic-gate reread:
1093*0Sstevel@tonic-gate 			pend = cyclic->cy_pend;
1094*0Sstevel@tonic-gate 			npend = pend - 1;
1095*0Sstevel@tonic-gate 
1096*0Sstevel@tonic-gate 			if (pend == 0) {
1097*0Sstevel@tonic-gate 				if (cpu->cyp_state == CYS_REMOVING) {
1098*0Sstevel@tonic-gate 					/*
1099*0Sstevel@tonic-gate 					 * This cyclic has been removed while
1100*0Sstevel@tonic-gate 					 * it had a non-zero pend count (we
1101*0Sstevel@tonic-gate 					 * know it was non-zero because we
1102*0Sstevel@tonic-gate 					 * found this cyclic in the pcbuffer).
1103*0Sstevel@tonic-gate 					 * There must be a non-zero rpend for
1104*0Sstevel@tonic-gate 					 * this CPU, and there must be a remove
1105*0Sstevel@tonic-gate 					 * operation blocking; we'll call into
1106*0Sstevel@tonic-gate 					 * cyclic_remove_pend() to clean this
1107*0Sstevel@tonic-gate 					 * up, and break out of the pend loop.
1108*0Sstevel@tonic-gate 					 */
1109*0Sstevel@tonic-gate 					cyclic_remove_pend(cpu, level, cyclic);
1110*0Sstevel@tonic-gate 					break;
1111*0Sstevel@tonic-gate 				}
1112*0Sstevel@tonic-gate 
1113*0Sstevel@tonic-gate 				/*
1114*0Sstevel@tonic-gate 				 * We must have had a resize interrupt us.
1115*0Sstevel@tonic-gate 				 */
1116*0Sstevel@tonic-gate 				CYC_TRACE(cpu, level, "resize-int", cyclics, 0);
1117*0Sstevel@tonic-gate 				ASSERT(cpu->cyp_state == CYS_EXPANDING);
1118*0Sstevel@tonic-gate 				ASSERT(cyclics != cpu->cyp_cyclics);
1119*0Sstevel@tonic-gate 				ASSERT(resized == 0);
1120*0Sstevel@tonic-gate 				ASSERT(intr_resized == 0);
1121*0Sstevel@tonic-gate 				intr_resized = 1;
1122*0Sstevel@tonic-gate 				cyclics = cpu->cyp_cyclics;
1123*0Sstevel@tonic-gate 				cyclic = &cyclics[buf[consmasked]];
1124*0Sstevel@tonic-gate 				ASSERT(cyclic->cy_handler == handler);
1125*0Sstevel@tonic-gate 				ASSERT(cyclic->cy_arg == arg);
1126*0Sstevel@tonic-gate 				goto reread;
1127*0Sstevel@tonic-gate 			}
1128*0Sstevel@tonic-gate 
1129*0Sstevel@tonic-gate 			if ((opend =
1130*0Sstevel@tonic-gate 			    cas32(&cyclic->cy_pend, pend, npend)) != pend) {
1131*0Sstevel@tonic-gate 				/*
1132*0Sstevel@tonic-gate 				 * Our cas32 can fail for one of several
1133*0Sstevel@tonic-gate 				 * reasons:
1134*0Sstevel@tonic-gate 				 *
1135*0Sstevel@tonic-gate 				 *  (a)	An intervening high level bumped up the
1136*0Sstevel@tonic-gate 				 *	pend count on this cyclic.  In this
1137*0Sstevel@tonic-gate 				 *	case, we will see a higher pend.
1138*0Sstevel@tonic-gate 				 *
1139*0Sstevel@tonic-gate 				 *  (b)	The cyclics array has been yanked out
1140*0Sstevel@tonic-gate 				 *	from underneath us by a resize
1141*0Sstevel@tonic-gate 				 *	operation.  In this case, pend is 0 and
1142*0Sstevel@tonic-gate 				 *	cyp_state is CYS_EXPANDING.
1143*0Sstevel@tonic-gate 				 *
1144*0Sstevel@tonic-gate 				 *  (c)	The cyclic has been removed by an
1145*0Sstevel@tonic-gate 				 *	intervening remove-xcall.  In this case,
1146*0Sstevel@tonic-gate 				 *	pend will be 0, the cyp_state will be
1147*0Sstevel@tonic-gate 				 *	CYS_REMOVING, and the cyclic will be
1148*0Sstevel@tonic-gate 				 *	marked CYF_FREE.
1149*0Sstevel@tonic-gate 				 *
1150*0Sstevel@tonic-gate 				 * The assertion below checks that we are
1151*0Sstevel@tonic-gate 				 * in one of the above situations.  The
1152*0Sstevel@tonic-gate 				 * action under all three is to return to
1153*0Sstevel@tonic-gate 				 * the top of the loop.
1154*0Sstevel@tonic-gate 				 */
1155*0Sstevel@tonic-gate 				CYC_TRACE(cpu, level, "cas-fail", opend, pend);
1156*0Sstevel@tonic-gate 				ASSERT(opend > pend || (opend == 0 &&
1157*0Sstevel@tonic-gate 				    ((cyclics != cpu->cyp_cyclics &&
1158*0Sstevel@tonic-gate 				    cpu->cyp_state == CYS_EXPANDING) ||
1159*0Sstevel@tonic-gate 				    (cpu->cyp_state == CYS_REMOVING &&
1160*0Sstevel@tonic-gate 				    (cyclic->cy_flags & CYF_FREE)))));
1161*0Sstevel@tonic-gate 				goto reread;
1162*0Sstevel@tonic-gate 			}
1163*0Sstevel@tonic-gate 
1164*0Sstevel@tonic-gate 			/*
1165*0Sstevel@tonic-gate 			 * Okay, so we've managed to successfully decrement
1166*0Sstevel@tonic-gate 			 * pend.  If we just decremented the pend to 0, we're
1167*0Sstevel@tonic-gate 			 * done.
1168*0Sstevel@tonic-gate 			 */
1169*0Sstevel@tonic-gate 		} while (npend > 0);
1170*0Sstevel@tonic-gate 
1171*0Sstevel@tonic-gate 		pc->cypc_consndx = ++consndx;
1172*0Sstevel@tonic-gate 	}
1173*0Sstevel@tonic-gate 
1174*0Sstevel@tonic-gate 	/*
1175*0Sstevel@tonic-gate 	 * If the high level handler is no longer writing to the same
1176*0Sstevel@tonic-gate 	 * buffer, then we've had a resize.  We need to switch our soft
1177*0Sstevel@tonic-gate 	 * index, and goto top.
1178*0Sstevel@tonic-gate 	 */
1179*0Sstevel@tonic-gate 	if (soft != softbuf->cys_hard) {
1180*0Sstevel@tonic-gate 		/*
1181*0Sstevel@tonic-gate 		 * We can assert that the other buffer has grown by exactly
1182*0Sstevel@tonic-gate 		 * one factor of two.
1183*0Sstevel@tonic-gate 		 */
1184*0Sstevel@tonic-gate 		CYC_TRACE(cpu, level, "buffer-grow", 0, 0);
1185*0Sstevel@tonic-gate 		ASSERT(cpu->cyp_state == CYS_EXPANDING);
1186*0Sstevel@tonic-gate 		ASSERT(softbuf->cys_buf[softbuf->cys_hard].cypc_sizemask ==
1187*0Sstevel@tonic-gate 		    (softbuf->cys_buf[soft].cypc_sizemask << 1) + 1 ||
1188*0Sstevel@tonic-gate 		    softbuf->cys_buf[soft].cypc_sizemask == 0);
1189*0Sstevel@tonic-gate 		ASSERT(softbuf->cys_hard == (softbuf->cys_soft ^ 1));
1190*0Sstevel@tonic-gate 
1191*0Sstevel@tonic-gate 		/*
1192*0Sstevel@tonic-gate 		 * If our cached cyclics pointer doesn't match cyp_cyclics,
1193*0Sstevel@tonic-gate 		 * then we took a resize between our last iteration of the
1194*0Sstevel@tonic-gate 		 * pend loop and the check against softbuf->cys_hard.
1195*0Sstevel@tonic-gate 		 */
1196*0Sstevel@tonic-gate 		if (cpu->cyp_cyclics != cyclics) {
1197*0Sstevel@tonic-gate 			CYC_TRACE1(cpu, level, "resize-int-int", consndx);
1198*0Sstevel@tonic-gate 			cyclics = cpu->cyp_cyclics;
1199*0Sstevel@tonic-gate 		}
1200*0Sstevel@tonic-gate 
1201*0Sstevel@tonic-gate 		softbuf->cys_soft = softbuf->cys_hard;
1202*0Sstevel@tonic-gate 
1203*0Sstevel@tonic-gate 		ASSERT(resized == 0);
1204*0Sstevel@tonic-gate 		resized = 1;
1205*0Sstevel@tonic-gate 		goto top;
1206*0Sstevel@tonic-gate 	}
1207*0Sstevel@tonic-gate 
1208*0Sstevel@tonic-gate 	/*
1209*0Sstevel@tonic-gate 	 * If we were interrupted by a resize operation, then we must have
1210*0Sstevel@tonic-gate 	 * seen the hard index change.
1211*0Sstevel@tonic-gate 	 */
1212*0Sstevel@tonic-gate 	ASSERT(!(intr_resized == 1 && resized == 0));
1213*0Sstevel@tonic-gate 
1214*0Sstevel@tonic-gate 	if (resized) {
1215*0Sstevel@tonic-gate 		uint32_t lev, nlev;
1216*0Sstevel@tonic-gate 
1217*0Sstevel@tonic-gate 		ASSERT(cpu->cyp_state == CYS_EXPANDING);
1218*0Sstevel@tonic-gate 
1219*0Sstevel@tonic-gate 		do {
1220*0Sstevel@tonic-gate 			lev = cpu->cyp_modify_levels;
1221*0Sstevel@tonic-gate 			nlev = lev + 1;
1222*0Sstevel@tonic-gate 		} while (cas32(&cpu->cyp_modify_levels, lev, nlev) != lev);
1223*0Sstevel@tonic-gate 
1224*0Sstevel@tonic-gate 		/*
1225*0Sstevel@tonic-gate 		 * If we are the last soft level to see the modification,
1226*0Sstevel@tonic-gate 		 * post on cyp_modify_wait.  Otherwise, (if we're not
1227*0Sstevel@tonic-gate 		 * already at low level), post down to the next soft level.
1228*0Sstevel@tonic-gate 		 */
1229*0Sstevel@tonic-gate 		if (nlev == CY_SOFT_LEVELS) {
1230*0Sstevel@tonic-gate 			CYC_TRACE0(cpu, level, "resize-kick");
1231*0Sstevel@tonic-gate 			sema_v(&cpu->cyp_modify_wait);
1232*0Sstevel@tonic-gate 		} else {
1233*0Sstevel@tonic-gate 			ASSERT(nlev < CY_SOFT_LEVELS);
1234*0Sstevel@tonic-gate 			if (level != CY_LOW_LEVEL) {
1235*0Sstevel@tonic-gate 				cyc_backend_t *be = cpu->cyp_backend;
1236*0Sstevel@tonic-gate 
1237*0Sstevel@tonic-gate 				CYC_TRACE0(cpu, level, "resize-post");
1238*0Sstevel@tonic-gate 				be->cyb_softint(be->cyb_arg, level - 1);
1239*0Sstevel@tonic-gate 			}
1240*0Sstevel@tonic-gate 		}
1241*0Sstevel@tonic-gate 	}
1242*0Sstevel@tonic-gate }
1243*0Sstevel@tonic-gate 
1244*0Sstevel@tonic-gate static void
1245*0Sstevel@tonic-gate cyclic_expand_xcall(cyc_xcallarg_t *arg)
1246*0Sstevel@tonic-gate {
1247*0Sstevel@tonic-gate 	cyc_cpu_t *cpu = arg->cyx_cpu;
1248*0Sstevel@tonic-gate 	cyc_backend_t *be = cpu->cyp_backend;
1249*0Sstevel@tonic-gate 	cyb_arg_t bar = be->cyb_arg;
1250*0Sstevel@tonic-gate 	cyc_cookie_t cookie;
1251*0Sstevel@tonic-gate 	cyc_index_t new_size = arg->cyx_size, size = cpu->cyp_size, i;
1252*0Sstevel@tonic-gate 	cyc_index_t *new_heap = arg->cyx_heap;
1253*0Sstevel@tonic-gate 	cyclic_t *cyclics = cpu->cyp_cyclics, *new_cyclics = arg->cyx_cyclics;
1254*0Sstevel@tonic-gate 
1255*0Sstevel@tonic-gate 	ASSERT(cpu->cyp_state == CYS_EXPANDING);
1256*0Sstevel@tonic-gate 
1257*0Sstevel@tonic-gate 	/*
1258*0Sstevel@tonic-gate 	 * This is a little dicey.  First, we'll raise our interrupt level
1259*0Sstevel@tonic-gate 	 * to CY_HIGH_LEVEL.  This CPU already has a new heap, cyclic array,
1260*0Sstevel@tonic-gate 	 * etc.; we just need to bcopy them across.  As for the softint
1261*0Sstevel@tonic-gate 	 * buffers, we'll switch the active buffers.  The actual softints will
1262*0Sstevel@tonic-gate 	 * take care of consuming any pending cyclics in the old buffer.
1263*0Sstevel@tonic-gate 	 */
1264*0Sstevel@tonic-gate 	cookie = be->cyb_set_level(bar, CY_HIGH_LEVEL);
1265*0Sstevel@tonic-gate 
1266*0Sstevel@tonic-gate 	CYC_TRACE(cpu, CY_HIGH_LEVEL, "expand", new_size, 0);
1267*0Sstevel@tonic-gate 
1268*0Sstevel@tonic-gate 	/*
1269*0Sstevel@tonic-gate 	 * Assert that the new size is a power of 2.
1270*0Sstevel@tonic-gate 	 */
1271*0Sstevel@tonic-gate 	ASSERT((new_size & new_size - 1) == 0);
1272*0Sstevel@tonic-gate 	ASSERT(new_size == (size << 1));
1273*0Sstevel@tonic-gate 	ASSERT(cpu->cyp_heap != NULL && cpu->cyp_cyclics != NULL);
1274*0Sstevel@tonic-gate 
1275*0Sstevel@tonic-gate 	bcopy(cpu->cyp_heap, new_heap, sizeof (cyc_index_t) * size);
1276*0Sstevel@tonic-gate 	bcopy(cyclics, new_cyclics, sizeof (cyclic_t) * size);
1277*0Sstevel@tonic-gate 
1278*0Sstevel@tonic-gate 	/*
1279*0Sstevel@tonic-gate 	 * Now run through the old cyclics array, setting pend to 0.  To
1280*0Sstevel@tonic-gate 	 * softints (which are executing at a lower priority level), the
1281*0Sstevel@tonic-gate 	 * pends dropping to 0 will appear atomic with the cyp_cyclics
1282*0Sstevel@tonic-gate 	 * pointer changing.
1283*0Sstevel@tonic-gate 	 */
1284*0Sstevel@tonic-gate 	for (i = 0; i < size; i++)
1285*0Sstevel@tonic-gate 		cyclics[i].cy_pend = 0;
1286*0Sstevel@tonic-gate 
1287*0Sstevel@tonic-gate 	/*
1288*0Sstevel@tonic-gate 	 * Set up the free list, and set all of the new cyclics to be CYF_FREE.
1289*0Sstevel@tonic-gate 	 */
1290*0Sstevel@tonic-gate 	for (i = size; i < new_size; i++) {
1291*0Sstevel@tonic-gate 		new_heap[i] = i;
1292*0Sstevel@tonic-gate 		new_cyclics[i].cy_flags = CYF_FREE;
1293*0Sstevel@tonic-gate 	}
1294*0Sstevel@tonic-gate 
1295*0Sstevel@tonic-gate 	/*
1296*0Sstevel@tonic-gate 	 * We can go ahead and plow the value of cyp_heap and cyp_cyclics;
1297*0Sstevel@tonic-gate 	 * cyclic_expand() has kept a copy.
1298*0Sstevel@tonic-gate 	 */
1299*0Sstevel@tonic-gate 	cpu->cyp_heap = new_heap;
1300*0Sstevel@tonic-gate 	cpu->cyp_cyclics = new_cyclics;
1301*0Sstevel@tonic-gate 	cpu->cyp_size = new_size;
1302*0Sstevel@tonic-gate 
1303*0Sstevel@tonic-gate 	/*
1304*0Sstevel@tonic-gate 	 * We've switched over the heap and the cyclics array.  Now we need
1305*0Sstevel@tonic-gate 	 * to switch over our active softint buffer pointers.
1306*0Sstevel@tonic-gate 	 */
1307*0Sstevel@tonic-gate 	for (i = CY_LOW_LEVEL; i < CY_LOW_LEVEL + CY_SOFT_LEVELS; i++) {
1308*0Sstevel@tonic-gate 		cyc_softbuf_t *softbuf = &cpu->cyp_softbuf[i];
1309*0Sstevel@tonic-gate 		uchar_t hard = softbuf->cys_hard;
1310*0Sstevel@tonic-gate 
1311*0Sstevel@tonic-gate 		/*
1312*0Sstevel@tonic-gate 		 * Assert that we're not in the middle of a resize operation.
1313*0Sstevel@tonic-gate 		 */
1314*0Sstevel@tonic-gate 		ASSERT(hard == softbuf->cys_soft);
1315*0Sstevel@tonic-gate 		ASSERT(hard == 0 || hard == 1);
1316*0Sstevel@tonic-gate 		ASSERT(softbuf->cys_buf[hard].cypc_buf != NULL);
1317*0Sstevel@tonic-gate 
1318*0Sstevel@tonic-gate 		softbuf->cys_hard = hard ^ 1;
1319*0Sstevel@tonic-gate 
1320*0Sstevel@tonic-gate 		/*
1321*0Sstevel@tonic-gate 		 * The caller (cyclic_expand()) is responsible for setting
1322*0Sstevel@tonic-gate 		 * up the new producer-consumer buffer; assert that it's
1323*0Sstevel@tonic-gate 		 * been done correctly.
1324*0Sstevel@tonic-gate 		 */
1325*0Sstevel@tonic-gate 		ASSERT(softbuf->cys_buf[hard ^ 1].cypc_buf != NULL);
1326*0Sstevel@tonic-gate 		ASSERT(softbuf->cys_buf[hard ^ 1].cypc_prodndx == 0);
1327*0Sstevel@tonic-gate 		ASSERT(softbuf->cys_buf[hard ^ 1].cypc_consndx == 0);
1328*0Sstevel@tonic-gate 	}
1329*0Sstevel@tonic-gate 
1330*0Sstevel@tonic-gate 	/*
1331*0Sstevel@tonic-gate 	 * That's all there is to it; now we just need to postdown to
1332*0Sstevel@tonic-gate 	 * get the softint chain going.
1333*0Sstevel@tonic-gate 	 */
1334*0Sstevel@tonic-gate 	be->cyb_softint(bar, CY_HIGH_LEVEL - 1);
1335*0Sstevel@tonic-gate 	be->cyb_restore_level(bar, cookie);
1336*0Sstevel@tonic-gate }
1337*0Sstevel@tonic-gate 
1338*0Sstevel@tonic-gate /*
1339*0Sstevel@tonic-gate  * cyclic_expand() will cross call onto the CPU to perform the actual
1340*0Sstevel@tonic-gate  * expand operation.
1341*0Sstevel@tonic-gate  */
1342*0Sstevel@tonic-gate static void
1343*0Sstevel@tonic-gate cyclic_expand(cyc_cpu_t *cpu)
1344*0Sstevel@tonic-gate {
1345*0Sstevel@tonic-gate 	cyc_index_t new_size, old_size;
1346*0Sstevel@tonic-gate 	cyc_index_t *new_heap, *old_heap;
1347*0Sstevel@tonic-gate 	cyclic_t *new_cyclics, *old_cyclics;
1348*0Sstevel@tonic-gate 	cyc_xcallarg_t arg;
1349*0Sstevel@tonic-gate 	cyc_backend_t *be = cpu->cyp_backend;
1350*0Sstevel@tonic-gate 	char old_hard;
1351*0Sstevel@tonic-gate 	int i;
1352*0Sstevel@tonic-gate 
1353*0Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&cpu_lock));
1354*0Sstevel@tonic-gate 	ASSERT(cpu->cyp_state == CYS_ONLINE);
1355*0Sstevel@tonic-gate 
1356*0Sstevel@tonic-gate 	cpu->cyp_state = CYS_EXPANDING;
1357*0Sstevel@tonic-gate 
1358*0Sstevel@tonic-gate 	old_heap = cpu->cyp_heap;
1359*0Sstevel@tonic-gate 	old_cyclics = cpu->cyp_cyclics;
1360*0Sstevel@tonic-gate 
1361*0Sstevel@tonic-gate 	if ((new_size = ((old_size = cpu->cyp_size) << 1)) == 0) {
1362*0Sstevel@tonic-gate 		new_size = CY_DEFAULT_PERCPU;
1363*0Sstevel@tonic-gate 		ASSERT(old_heap == NULL && old_cyclics == NULL);
1364*0Sstevel@tonic-gate 	}
1365*0Sstevel@tonic-gate 
1366*0Sstevel@tonic-gate 	/*
1367*0Sstevel@tonic-gate 	 * Check that the new_size is a power of 2.
1368*0Sstevel@tonic-gate 	 */
1369*0Sstevel@tonic-gate 	ASSERT((new_size - 1 & new_size) == 0);
1370*0Sstevel@tonic-gate 
1371*0Sstevel@tonic-gate 	new_heap = kmem_alloc(sizeof (cyc_index_t) * new_size, KM_SLEEP);
1372*0Sstevel@tonic-gate 	new_cyclics = kmem_zalloc(sizeof (cyclic_t) * new_size, KM_SLEEP);
1373*0Sstevel@tonic-gate 
1374*0Sstevel@tonic-gate 	/*
1375*0Sstevel@tonic-gate 	 * We know that no other expansions are in progress (they serialize
1376*0Sstevel@tonic-gate 	 * on cpu_lock), so we can safely read the softbuf metadata.
1377*0Sstevel@tonic-gate 	 */
1378*0Sstevel@tonic-gate 	old_hard = cpu->cyp_softbuf[0].cys_hard;
1379*0Sstevel@tonic-gate 
1380*0Sstevel@tonic-gate 	for (i = CY_LOW_LEVEL; i < CY_LOW_LEVEL + CY_SOFT_LEVELS; i++) {
1381*0Sstevel@tonic-gate 		cyc_softbuf_t *softbuf = &cpu->cyp_softbuf[i];
1382*0Sstevel@tonic-gate 		char hard = softbuf->cys_hard;
1383*0Sstevel@tonic-gate 		cyc_pcbuffer_t *pc = &softbuf->cys_buf[hard ^ 1];
1384*0Sstevel@tonic-gate 
1385*0Sstevel@tonic-gate 		ASSERT(hard == old_hard);
1386*0Sstevel@tonic-gate 		ASSERT(hard == softbuf->cys_soft);
1387*0Sstevel@tonic-gate 		ASSERT(pc->cypc_buf == NULL);
1388*0Sstevel@tonic-gate 
1389*0Sstevel@tonic-gate 		pc->cypc_buf =
1390*0Sstevel@tonic-gate 		    kmem_alloc(sizeof (cyc_index_t) * new_size, KM_SLEEP);
1391*0Sstevel@tonic-gate 		pc->cypc_prodndx = pc->cypc_consndx = 0;
1392*0Sstevel@tonic-gate 		pc->cypc_sizemask = new_size - 1;
1393*0Sstevel@tonic-gate 	}
1394*0Sstevel@tonic-gate 
1395*0Sstevel@tonic-gate 	arg.cyx_cpu = cpu;
1396*0Sstevel@tonic-gate 	arg.cyx_heap = new_heap;
1397*0Sstevel@tonic-gate 	arg.cyx_cyclics = new_cyclics;
1398*0Sstevel@tonic-gate 	arg.cyx_size = new_size;
1399*0Sstevel@tonic-gate 
1400*0Sstevel@tonic-gate 	cpu->cyp_modify_levels = 0;
1401*0Sstevel@tonic-gate 
1402*0Sstevel@tonic-gate 	be->cyb_xcall(be->cyb_arg, cpu->cyp_cpu,
1403*0Sstevel@tonic-gate 	    (cyc_func_t)cyclic_expand_xcall, &arg);
1404*0Sstevel@tonic-gate 
1405*0Sstevel@tonic-gate 	/*
1406*0Sstevel@tonic-gate 	 * Now block, waiting for the resize operation to complete.
1407*0Sstevel@tonic-gate 	 */
1408*0Sstevel@tonic-gate 	sema_p(&cpu->cyp_modify_wait);
1409*0Sstevel@tonic-gate 	ASSERT(cpu->cyp_modify_levels == CY_SOFT_LEVELS);
1410*0Sstevel@tonic-gate 
1411*0Sstevel@tonic-gate 	/*
1412*0Sstevel@tonic-gate 	 * The operation is complete; we can now free the old buffers.
1413*0Sstevel@tonic-gate 	 */
1414*0Sstevel@tonic-gate 	for (i = CY_LOW_LEVEL; i < CY_LOW_LEVEL + CY_SOFT_LEVELS; i++) {
1415*0Sstevel@tonic-gate 		cyc_softbuf_t *softbuf = &cpu->cyp_softbuf[i];
1416*0Sstevel@tonic-gate 		char hard = softbuf->cys_hard;
1417*0Sstevel@tonic-gate 		cyc_pcbuffer_t *pc = &softbuf->cys_buf[hard ^ 1];
1418*0Sstevel@tonic-gate 
1419*0Sstevel@tonic-gate 		ASSERT(hard == (old_hard ^ 1));
1420*0Sstevel@tonic-gate 		ASSERT(hard == softbuf->cys_soft);
1421*0Sstevel@tonic-gate 
1422*0Sstevel@tonic-gate 		if (pc->cypc_buf == NULL)
1423*0Sstevel@tonic-gate 			continue;
1424*0Sstevel@tonic-gate 
1425*0Sstevel@tonic-gate 		ASSERT(pc->cypc_sizemask == ((new_size - 1) >> 1));
1426*0Sstevel@tonic-gate 
1427*0Sstevel@tonic-gate 		kmem_free(pc->cypc_buf,
1428*0Sstevel@tonic-gate 		    sizeof (cyc_index_t) * (pc->cypc_sizemask + 1));
1429*0Sstevel@tonic-gate 		pc->cypc_buf = NULL;
1430*0Sstevel@tonic-gate 	}
1431*0Sstevel@tonic-gate 
1432*0Sstevel@tonic-gate 	if (old_cyclics != NULL) {
1433*0Sstevel@tonic-gate 		ASSERT(old_heap != NULL);
1434*0Sstevel@tonic-gate 		ASSERT(old_size != 0);
1435*0Sstevel@tonic-gate 		kmem_free(old_cyclics, sizeof (cyclic_t) * old_size);
1436*0Sstevel@tonic-gate 		kmem_free(old_heap, sizeof (cyc_index_t) * old_size);
1437*0Sstevel@tonic-gate 	}
1438*0Sstevel@tonic-gate 
1439*0Sstevel@tonic-gate 	ASSERT(cpu->cyp_state == CYS_EXPANDING);
1440*0Sstevel@tonic-gate 	cpu->cyp_state = CYS_ONLINE;
1441*0Sstevel@tonic-gate }
1442*0Sstevel@tonic-gate 
1443*0Sstevel@tonic-gate /*
1444*0Sstevel@tonic-gate  * cyclic_pick_cpu will attempt to pick a CPU according to the constraints
1445*0Sstevel@tonic-gate  * specified by the partition, bound CPU, and flags.  Additionally,
1446*0Sstevel@tonic-gate  * cyclic_pick_cpu() will not pick the avoid CPU; it will return NULL if
1447*0Sstevel@tonic-gate  * the avoid CPU is the only CPU which satisfies the constraints.
1448*0Sstevel@tonic-gate  *
1449*0Sstevel@tonic-gate  * If CYF_CPU_BOUND is set in flags, the specified CPU must be non-NULL.
1450*0Sstevel@tonic-gate  * If CYF_PART_BOUND is set in flags, the specified partition must be non-NULL.
1451*0Sstevel@tonic-gate  * If both CYF_CPU_BOUND and CYF_PART_BOUND are set, the specified CPU must
1452*0Sstevel@tonic-gate  * be in the specified partition.
1453*0Sstevel@tonic-gate  */
1454*0Sstevel@tonic-gate static cyc_cpu_t *
1455*0Sstevel@tonic-gate cyclic_pick_cpu(cpupart_t *part, cpu_t *bound, cpu_t *avoid, uint16_t flags)
1456*0Sstevel@tonic-gate {
1457*0Sstevel@tonic-gate 	cpu_t *c, *start = (part != NULL) ? part->cp_cpulist : CPU;
1458*0Sstevel@tonic-gate 	cpu_t *online = NULL;
1459*0Sstevel@tonic-gate 	uintptr_t offset;
1460*0Sstevel@tonic-gate 
1461*0Sstevel@tonic-gate 	CYC_PTRACE("pick-cpu", part, bound);
1462*0Sstevel@tonic-gate 
1463*0Sstevel@tonic-gate 	ASSERT(!(flags & CYF_CPU_BOUND) || bound != NULL);
1464*0Sstevel@tonic-gate 	ASSERT(!(flags & CYF_PART_BOUND) || part != NULL);
1465*0Sstevel@tonic-gate 
1466*0Sstevel@tonic-gate 	/*
1467*0Sstevel@tonic-gate 	 * If we're bound to our CPU, there isn't much choice involved.  We
1468*0Sstevel@tonic-gate 	 * need to check that the CPU passed as bound is in the cpupart, and
1469*0Sstevel@tonic-gate 	 * that the CPU that we're binding to has been configured.
1470*0Sstevel@tonic-gate 	 */
1471*0Sstevel@tonic-gate 	if (flags & CYF_CPU_BOUND) {
1472*0Sstevel@tonic-gate 		CYC_PTRACE("pick-cpu-bound", bound, avoid);
1473*0Sstevel@tonic-gate 
1474*0Sstevel@tonic-gate 		if ((flags & CYF_PART_BOUND) && bound->cpu_part != part)
1475*0Sstevel@tonic-gate 			panic("cyclic_pick_cpu:  "
1476*0Sstevel@tonic-gate 			    "CPU binding contradicts partition binding");
1477*0Sstevel@tonic-gate 
1478*0Sstevel@tonic-gate 		if (bound == avoid)
1479*0Sstevel@tonic-gate 			return (NULL);
1480*0Sstevel@tonic-gate 
1481*0Sstevel@tonic-gate 		if (bound->cpu_cyclic == NULL)
1482*0Sstevel@tonic-gate 			panic("cyclic_pick_cpu:  "
1483*0Sstevel@tonic-gate 			    "attempt to bind to non-configured CPU");
1484*0Sstevel@tonic-gate 
1485*0Sstevel@tonic-gate 		return (bound->cpu_cyclic);
1486*0Sstevel@tonic-gate 	}
1487*0Sstevel@tonic-gate 
1488*0Sstevel@tonic-gate 	if (flags & CYF_PART_BOUND) {
1489*0Sstevel@tonic-gate 		CYC_PTRACE("pick-part-bound", bound, avoid);
1490*0Sstevel@tonic-gate 		offset = offsetof(cpu_t, cpu_next_part);
1491*0Sstevel@tonic-gate 	} else {
1492*0Sstevel@tonic-gate 		offset = offsetof(cpu_t, cpu_next_onln);
1493*0Sstevel@tonic-gate 	}
1494*0Sstevel@tonic-gate 
1495*0Sstevel@tonic-gate 	c = start;
1496*0Sstevel@tonic-gate 	do {
1497*0Sstevel@tonic-gate 		if (c->cpu_cyclic == NULL)
1498*0Sstevel@tonic-gate 			continue;
1499*0Sstevel@tonic-gate 
1500*0Sstevel@tonic-gate 		if (c->cpu_cyclic->cyp_state == CYS_OFFLINE)
1501*0Sstevel@tonic-gate 			continue;
1502*0Sstevel@tonic-gate 
1503*0Sstevel@tonic-gate 		if (c == avoid)
1504*0Sstevel@tonic-gate 			continue;
1505*0Sstevel@tonic-gate 
1506*0Sstevel@tonic-gate 		if (c->cpu_flags & CPU_ENABLE)
1507*0Sstevel@tonic-gate 			goto found;
1508*0Sstevel@tonic-gate 
1509*0Sstevel@tonic-gate 		if (online == NULL)
1510*0Sstevel@tonic-gate 			online = c;
1511*0Sstevel@tonic-gate 	} while ((c = *(cpu_t **)((uintptr_t)c + offset)) != start);
1512*0Sstevel@tonic-gate 
1513*0Sstevel@tonic-gate 	/*
1514*0Sstevel@tonic-gate 	 * If we're here, we're in one of two situations:
1515*0Sstevel@tonic-gate 	 *
1516*0Sstevel@tonic-gate 	 *  (a)	We have a partition-bound cyclic, and there is no CPU in
1517*0Sstevel@tonic-gate 	 *	our partition which is CPU_ENABLE'd.  If we saw another
1518*0Sstevel@tonic-gate 	 *	non-CYS_OFFLINE CPU in our partition, we'll go with it.
1519*0Sstevel@tonic-gate 	 *	If not, the avoid CPU must be the only non-CYS_OFFLINE
1520*0Sstevel@tonic-gate 	 *	CPU in the partition; we're forced to return NULL.
1521*0Sstevel@tonic-gate 	 *
1522*0Sstevel@tonic-gate 	 *  (b)	We have a partition-unbound cyclic, in which case there
1523*0Sstevel@tonic-gate 	 *	must only be one CPU CPU_ENABLE'd, and it must be the one
1524*0Sstevel@tonic-gate 	 *	we're trying to avoid.  If cyclic_juggle()/cyclic_offline()
1525*0Sstevel@tonic-gate 	 *	are called appropriately, this generally shouldn't happen
1526*0Sstevel@tonic-gate 	 *	(the offline should fail before getting to this code).
1527*0Sstevel@tonic-gate 	 *	At any rate: we can't avoid the avoid CPU, so we return
1528*0Sstevel@tonic-gate 	 *	NULL.
1529*0Sstevel@tonic-gate 	 */
1530*0Sstevel@tonic-gate 	if (!(flags & CYF_PART_BOUND)) {
1531*0Sstevel@tonic-gate 		ASSERT(avoid->cpu_flags & CPU_ENABLE);
1532*0Sstevel@tonic-gate 		return (NULL);
1533*0Sstevel@tonic-gate 	}
1534*0Sstevel@tonic-gate 
1535*0Sstevel@tonic-gate 	CYC_PTRACE("pick-no-intr", part, avoid);
1536*0Sstevel@tonic-gate 
1537*0Sstevel@tonic-gate 	if ((c = online) != NULL)
1538*0Sstevel@tonic-gate 		goto found;
1539*0Sstevel@tonic-gate 
1540*0Sstevel@tonic-gate 	CYC_PTRACE("pick-fail", part, avoid);
1541*0Sstevel@tonic-gate 	ASSERT(avoid->cpu_part == start->cpu_part);
1542*0Sstevel@tonic-gate 	return (NULL);
1543*0Sstevel@tonic-gate 
1544*0Sstevel@tonic-gate found:
1545*0Sstevel@tonic-gate 	CYC_PTRACE("pick-cpu-found", c, avoid);
1546*0Sstevel@tonic-gate 	ASSERT(c != avoid);
1547*0Sstevel@tonic-gate 	ASSERT(c->cpu_cyclic != NULL);
1548*0Sstevel@tonic-gate 
1549*0Sstevel@tonic-gate 	return (c->cpu_cyclic);
1550*0Sstevel@tonic-gate }
1551*0Sstevel@tonic-gate 
1552*0Sstevel@tonic-gate static void
1553*0Sstevel@tonic-gate cyclic_add_xcall(cyc_xcallarg_t *arg)
1554*0Sstevel@tonic-gate {
1555*0Sstevel@tonic-gate 	cyc_cpu_t *cpu = arg->cyx_cpu;
1556*0Sstevel@tonic-gate 	cyc_handler_t *hdlr = arg->cyx_hdlr;
1557*0Sstevel@tonic-gate 	cyc_time_t *when = arg->cyx_when;
1558*0Sstevel@tonic-gate 	cyc_backend_t *be = cpu->cyp_backend;
1559*0Sstevel@tonic-gate 	cyc_index_t ndx, nelems;
1560*0Sstevel@tonic-gate 	cyc_cookie_t cookie;
1561*0Sstevel@tonic-gate 	cyb_arg_t bar = be->cyb_arg;
1562*0Sstevel@tonic-gate 	cyclic_t *cyclic;
1563*0Sstevel@tonic-gate 
1564*0Sstevel@tonic-gate 	ASSERT(cpu->cyp_nelems < cpu->cyp_size);
1565*0Sstevel@tonic-gate 
1566*0Sstevel@tonic-gate 	cookie = be->cyb_set_level(bar, CY_HIGH_LEVEL);
1567*0Sstevel@tonic-gate 
1568*0Sstevel@tonic-gate 	CYC_TRACE(cpu, CY_HIGH_LEVEL,
1569*0Sstevel@tonic-gate 	    "add-xcall", when->cyt_when, when->cyt_interval);
1570*0Sstevel@tonic-gate 
1571*0Sstevel@tonic-gate 	nelems = cpu->cyp_nelems++;
1572*0Sstevel@tonic-gate 
1573*0Sstevel@tonic-gate 	if (nelems == 0) {
1574*0Sstevel@tonic-gate 		/*
1575*0Sstevel@tonic-gate 		 * If this is the first element, we need to enable the
1576*0Sstevel@tonic-gate 		 * backend on this CPU.
1577*0Sstevel@tonic-gate 		 */
1578*0Sstevel@tonic-gate 		CYC_TRACE0(cpu, CY_HIGH_LEVEL, "enabled");
1579*0Sstevel@tonic-gate 		be->cyb_enable(bar);
1580*0Sstevel@tonic-gate 	}
1581*0Sstevel@tonic-gate 
1582*0Sstevel@tonic-gate 	ndx = cpu->cyp_heap[nelems];
1583*0Sstevel@tonic-gate 	cyclic = &cpu->cyp_cyclics[ndx];
1584*0Sstevel@tonic-gate 
1585*0Sstevel@tonic-gate 	ASSERT(cyclic->cy_flags == CYF_FREE);
1586*0Sstevel@tonic-gate 	cyclic->cy_interval = when->cyt_interval;
1587*0Sstevel@tonic-gate 
1588*0Sstevel@tonic-gate 	if (when->cyt_when == 0) {
1589*0Sstevel@tonic-gate 		/*
1590*0Sstevel@tonic-gate 		 * If a start time hasn't been explicitly specified, we'll
1591*0Sstevel@tonic-gate 		 * start on the next interval boundary.
1592*0Sstevel@tonic-gate 		 */
1593*0Sstevel@tonic-gate 		cyclic->cy_expire = (gethrtime() / cyclic->cy_interval + 1) *
1594*0Sstevel@tonic-gate 		    cyclic->cy_interval;
1595*0Sstevel@tonic-gate 	} else {
1596*0Sstevel@tonic-gate 		cyclic->cy_expire = when->cyt_when;
1597*0Sstevel@tonic-gate 	}
1598*0Sstevel@tonic-gate 
1599*0Sstevel@tonic-gate 	cyclic->cy_handler = hdlr->cyh_func;
1600*0Sstevel@tonic-gate 	cyclic->cy_arg = hdlr->cyh_arg;
1601*0Sstevel@tonic-gate 	cyclic->cy_level = hdlr->cyh_level;
1602*0Sstevel@tonic-gate 	cyclic->cy_flags = arg->cyx_flags;
1603*0Sstevel@tonic-gate 
1604*0Sstevel@tonic-gate 	if (cyclic_upheap(cpu, nelems)) {
1605*0Sstevel@tonic-gate 		hrtime_t exp = cyclic->cy_expire;
1606*0Sstevel@tonic-gate 
1607*0Sstevel@tonic-gate 		CYC_TRACE(cpu, CY_HIGH_LEVEL, "add-reprog", cyclic, exp);
1608*0Sstevel@tonic-gate 
1609*0Sstevel@tonic-gate 		/*
1610*0Sstevel@tonic-gate 		 * If our upheap propagated to the root, we need to
1611*0Sstevel@tonic-gate 		 * reprogram the interrupt source.
1612*0Sstevel@tonic-gate 		 */
1613*0Sstevel@tonic-gate 		be->cyb_reprogram(bar, exp);
1614*0Sstevel@tonic-gate 	}
1615*0Sstevel@tonic-gate 	be->cyb_restore_level(bar, cookie);
1616*0Sstevel@tonic-gate 
1617*0Sstevel@tonic-gate 	arg->cyx_ndx = ndx;
1618*0Sstevel@tonic-gate }
1619*0Sstevel@tonic-gate 
1620*0Sstevel@tonic-gate static cyc_index_t
1621*0Sstevel@tonic-gate cyclic_add_here(cyc_cpu_t *cpu, cyc_handler_t *hdlr,
1622*0Sstevel@tonic-gate     cyc_time_t *when, uint16_t flags)
1623*0Sstevel@tonic-gate {
1624*0Sstevel@tonic-gate 	cyc_backend_t *be = cpu->cyp_backend;
1625*0Sstevel@tonic-gate 	cyb_arg_t bar = be->cyb_arg;
1626*0Sstevel@tonic-gate 	cyc_xcallarg_t arg;
1627*0Sstevel@tonic-gate 
1628*0Sstevel@tonic-gate 	CYC_PTRACE("add-cpu", cpu, hdlr->cyh_func);
1629*0Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&cpu_lock));
1630*0Sstevel@tonic-gate 	ASSERT(cpu->cyp_state == CYS_ONLINE);
1631*0Sstevel@tonic-gate 	ASSERT(!(cpu->cyp_cpu->cpu_flags & CPU_OFFLINE));
1632*0Sstevel@tonic-gate 	ASSERT(when->cyt_when >= 0 && when->cyt_interval > 0);
1633*0Sstevel@tonic-gate 
1634*0Sstevel@tonic-gate 	if (cpu->cyp_nelems == cpu->cyp_size) {
1635*0Sstevel@tonic-gate 		/*
1636*0Sstevel@tonic-gate 		 * This is expensive; it will cross call onto the other
1637*0Sstevel@tonic-gate 		 * CPU to perform the expansion.
1638*0Sstevel@tonic-gate 		 */
1639*0Sstevel@tonic-gate 		cyclic_expand(cpu);
1640*0Sstevel@tonic-gate 		ASSERT(cpu->cyp_nelems < cpu->cyp_size);
1641*0Sstevel@tonic-gate 	}
1642*0Sstevel@tonic-gate 
1643*0Sstevel@tonic-gate 	/*
1644*0Sstevel@tonic-gate 	 * By now, we know that we're going to be able to successfully
1645*0Sstevel@tonic-gate 	 * perform the add.  Now cross call over to the CPU of interest to
1646*0Sstevel@tonic-gate 	 * actually add our cyclic.
1647*0Sstevel@tonic-gate 	 */
1648*0Sstevel@tonic-gate 	arg.cyx_cpu = cpu;
1649*0Sstevel@tonic-gate 	arg.cyx_hdlr = hdlr;
1650*0Sstevel@tonic-gate 	arg.cyx_when = when;
1651*0Sstevel@tonic-gate 	arg.cyx_flags = flags;
1652*0Sstevel@tonic-gate 
1653*0Sstevel@tonic-gate 	be->cyb_xcall(bar, cpu->cyp_cpu, (cyc_func_t)cyclic_add_xcall, &arg);
1654*0Sstevel@tonic-gate 
1655*0Sstevel@tonic-gate 	CYC_PTRACE("add-cpu-done", cpu, arg.cyx_ndx);
1656*0Sstevel@tonic-gate 
1657*0Sstevel@tonic-gate 	return (arg.cyx_ndx);
1658*0Sstevel@tonic-gate }
1659*0Sstevel@tonic-gate 
1660*0Sstevel@tonic-gate static void
1661*0Sstevel@tonic-gate cyclic_remove_xcall(cyc_xcallarg_t *arg)
1662*0Sstevel@tonic-gate {
1663*0Sstevel@tonic-gate 	cyc_cpu_t *cpu = arg->cyx_cpu;
1664*0Sstevel@tonic-gate 	cyc_backend_t *be = cpu->cyp_backend;
1665*0Sstevel@tonic-gate 	cyb_arg_t bar = be->cyb_arg;
1666*0Sstevel@tonic-gate 	cyc_cookie_t cookie;
1667*0Sstevel@tonic-gate 	cyc_index_t ndx = arg->cyx_ndx, nelems = cpu->cyp_nelems, i;
1668*0Sstevel@tonic-gate 	cyc_index_t *heap = cpu->cyp_heap, last;
1669*0Sstevel@tonic-gate 	cyclic_t *cyclic;
1670*0Sstevel@tonic-gate #ifdef DEBUG
1671*0Sstevel@tonic-gate 	cyc_index_t root;
1672*0Sstevel@tonic-gate #endif
1673*0Sstevel@tonic-gate 
1674*0Sstevel@tonic-gate 	ASSERT(cpu->cyp_state == CYS_REMOVING);
1675*0Sstevel@tonic-gate 	ASSERT(nelems > 0);
1676*0Sstevel@tonic-gate 
1677*0Sstevel@tonic-gate 	cookie = be->cyb_set_level(bar, CY_HIGH_LEVEL);
1678*0Sstevel@tonic-gate 
1679*0Sstevel@tonic-gate 	CYC_TRACE1(cpu, CY_HIGH_LEVEL, "remove-xcall", ndx);
1680*0Sstevel@tonic-gate 
1681*0Sstevel@tonic-gate 	cyclic = &cpu->cyp_cyclics[ndx];
1682*0Sstevel@tonic-gate 
1683*0Sstevel@tonic-gate 	/*
1684*0Sstevel@tonic-gate 	 * Grab the current expiration time.  If this cyclic is being
1685*0Sstevel@tonic-gate 	 * removed as part of a juggling operation, the expiration time
1686*0Sstevel@tonic-gate 	 * will be used when the cyclic is added to the new CPU.
1687*0Sstevel@tonic-gate 	 */
1688*0Sstevel@tonic-gate 	if (arg->cyx_when != NULL) {
1689*0Sstevel@tonic-gate 		arg->cyx_when->cyt_when = cyclic->cy_expire;
1690*0Sstevel@tonic-gate 		arg->cyx_when->cyt_interval = cyclic->cy_interval;
1691*0Sstevel@tonic-gate 	}
1692*0Sstevel@tonic-gate 
1693*0Sstevel@tonic-gate 	if (cyclic->cy_pend != 0) {
1694*0Sstevel@tonic-gate 		/*
1695*0Sstevel@tonic-gate 		 * The pend is non-zero; this cyclic is currently being
1696*0Sstevel@tonic-gate 		 * executed (or will be executed shortly).  If the caller
1697*0Sstevel@tonic-gate 		 * refuses to wait, we must return (doing nothing).  Otherwise,
1698*0Sstevel@tonic-gate 		 * we will stash the pend value * in this CPU's rpend, and
1699*0Sstevel@tonic-gate 		 * then zero it out.  The softint in the pend loop will see
1700*0Sstevel@tonic-gate 		 * that we have zeroed out pend, and will call the cyclic
1701*0Sstevel@tonic-gate 		 * handler rpend times.  The caller will wait until the
1702*0Sstevel@tonic-gate 		 * softint has completed calling the cyclic handler.
1703*0Sstevel@tonic-gate 		 */
1704*0Sstevel@tonic-gate 		if (arg->cyx_wait == CY_NOWAIT) {
1705*0Sstevel@tonic-gate 			arg->cyx_wait = CY_WAIT;
1706*0Sstevel@tonic-gate 			goto out;
1707*0Sstevel@tonic-gate 		}
1708*0Sstevel@tonic-gate 
1709*0Sstevel@tonic-gate 		ASSERT(cyclic->cy_level != CY_HIGH_LEVEL);
1710*0Sstevel@tonic-gate 		CYC_TRACE1(cpu, CY_HIGH_LEVEL, "remove-pend", cyclic->cy_pend);
1711*0Sstevel@tonic-gate 		cpu->cyp_rpend = cyclic->cy_pend;
1712*0Sstevel@tonic-gate 		cyclic->cy_pend = 0;
1713*0Sstevel@tonic-gate 	}
1714*0Sstevel@tonic-gate 
1715*0Sstevel@tonic-gate 	/*
1716*0Sstevel@tonic-gate 	 * Now set the flags to CYF_FREE.  We don't need a membar_enter()
1717*0Sstevel@tonic-gate 	 * between zeroing pend and setting the flags because we're at
1718*0Sstevel@tonic-gate 	 * CY_HIGH_LEVEL (that is, the zeroing of pend and the setting
1719*0Sstevel@tonic-gate 	 * of cy_flags appear atomic to softints).
1720*0Sstevel@tonic-gate 	 */
1721*0Sstevel@tonic-gate 	cyclic->cy_flags = CYF_FREE;
1722*0Sstevel@tonic-gate 
1723*0Sstevel@tonic-gate 	for (i = 0; i < nelems; i++) {
1724*0Sstevel@tonic-gate 		if (heap[i] == ndx)
1725*0Sstevel@tonic-gate 			break;
1726*0Sstevel@tonic-gate 	}
1727*0Sstevel@tonic-gate 
1728*0Sstevel@tonic-gate 	if (i == nelems)
1729*0Sstevel@tonic-gate 		panic("attempt to remove non-existent cyclic");
1730*0Sstevel@tonic-gate 
1731*0Sstevel@tonic-gate 	cpu->cyp_nelems = --nelems;
1732*0Sstevel@tonic-gate 
1733*0Sstevel@tonic-gate 	if (nelems == 0) {
1734*0Sstevel@tonic-gate 		/*
1735*0Sstevel@tonic-gate 		 * If we just removed the last element, then we need to
1736*0Sstevel@tonic-gate 		 * disable the backend on this CPU.
1737*0Sstevel@tonic-gate 		 */
1738*0Sstevel@tonic-gate 		CYC_TRACE0(cpu, CY_HIGH_LEVEL, "disabled");
1739*0Sstevel@tonic-gate 		be->cyb_disable(bar);
1740*0Sstevel@tonic-gate 	}
1741*0Sstevel@tonic-gate 
1742*0Sstevel@tonic-gate 	if (i == nelems) {
1743*0Sstevel@tonic-gate 		/*
1744*0Sstevel@tonic-gate 		 * If we just removed the last element of the heap, then
1745*0Sstevel@tonic-gate 		 * we don't have to downheap.
1746*0Sstevel@tonic-gate 		 */
1747*0Sstevel@tonic-gate 		CYC_TRACE0(cpu, CY_HIGH_LEVEL, "remove-bottom");
1748*0Sstevel@tonic-gate 		goto out;
1749*0Sstevel@tonic-gate 	}
1750*0Sstevel@tonic-gate 
1751*0Sstevel@tonic-gate #ifdef DEBUG
1752*0Sstevel@tonic-gate 	root = heap[0];
1753*0Sstevel@tonic-gate #endif
1754*0Sstevel@tonic-gate 
1755*0Sstevel@tonic-gate 	/*
1756*0Sstevel@tonic-gate 	 * Swap the last element of the heap with the one we want to
1757*0Sstevel@tonic-gate 	 * remove, and downheap (this has the implicit effect of putting
1758*0Sstevel@tonic-gate 	 * the newly freed element on the free list).
1759*0Sstevel@tonic-gate 	 */
1760*0Sstevel@tonic-gate 	heap[i] = (last = heap[nelems]);
1761*0Sstevel@tonic-gate 	heap[nelems] = ndx;
1762*0Sstevel@tonic-gate 
1763*0Sstevel@tonic-gate 	if (i == 0) {
1764*0Sstevel@tonic-gate 		CYC_TRACE0(cpu, CY_HIGH_LEVEL, "remove-root");
1765*0Sstevel@tonic-gate 		cyclic_downheap(cpu, 0);
1766*0Sstevel@tonic-gate 	} else {
1767*0Sstevel@tonic-gate 		if (cyclic_upheap(cpu, i) == 0) {
1768*0Sstevel@tonic-gate 			/*
1769*0Sstevel@tonic-gate 			 * The upheap didn't propagate to the root; if it
1770*0Sstevel@tonic-gate 			 * didn't propagate at all, we need to downheap.
1771*0Sstevel@tonic-gate 			 */
1772*0Sstevel@tonic-gate 			CYC_TRACE0(cpu, CY_HIGH_LEVEL, "remove-no-root");
1773*0Sstevel@tonic-gate 			if (heap[i] == last) {
1774*0Sstevel@tonic-gate 				CYC_TRACE0(cpu, CY_HIGH_LEVEL, "remove-no-up");
1775*0Sstevel@tonic-gate 				cyclic_downheap(cpu, i);
1776*0Sstevel@tonic-gate 			}
1777*0Sstevel@tonic-gate 			ASSERT(heap[0] == root);
1778*0Sstevel@tonic-gate 			goto out;
1779*0Sstevel@tonic-gate 		}
1780*0Sstevel@tonic-gate 	}
1781*0Sstevel@tonic-gate 
1782*0Sstevel@tonic-gate 	/*
1783*0Sstevel@tonic-gate 	 * We're here because we changed the root; we need to reprogram
1784*0Sstevel@tonic-gate 	 * the clock source.
1785*0Sstevel@tonic-gate 	 */
1786*0Sstevel@tonic-gate 	cyclic = &cpu->cyp_cyclics[heap[0]];
1787*0Sstevel@tonic-gate 
1788*0Sstevel@tonic-gate 	CYC_TRACE0(cpu, CY_HIGH_LEVEL, "remove-reprog");
1789*0Sstevel@tonic-gate 
1790*0Sstevel@tonic-gate 	ASSERT(nelems != 0);
1791*0Sstevel@tonic-gate 	be->cyb_reprogram(bar, cyclic->cy_expire);
1792*0Sstevel@tonic-gate out:
1793*0Sstevel@tonic-gate 	be->cyb_restore_level(bar, cookie);
1794*0Sstevel@tonic-gate }
1795*0Sstevel@tonic-gate 
1796*0Sstevel@tonic-gate static int
1797*0Sstevel@tonic-gate cyclic_remove_here(cyc_cpu_t *cpu, cyc_index_t ndx, cyc_time_t *when, int wait)
1798*0Sstevel@tonic-gate {
1799*0Sstevel@tonic-gate 	cyc_backend_t *be = cpu->cyp_backend;
1800*0Sstevel@tonic-gate 	cyc_xcallarg_t arg;
1801*0Sstevel@tonic-gate 	cyclic_t *cyclic = &cpu->cyp_cyclics[ndx];
1802*0Sstevel@tonic-gate 	cyc_level_t level = cyclic->cy_level;
1803*0Sstevel@tonic-gate 
1804*0Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&cpu_lock));
1805*0Sstevel@tonic-gate 	ASSERT(cpu->cyp_rpend == 0);
1806*0Sstevel@tonic-gate 	ASSERT(wait == CY_WAIT || wait == CY_NOWAIT);
1807*0Sstevel@tonic-gate 
1808*0Sstevel@tonic-gate 	arg.cyx_ndx = ndx;
1809*0Sstevel@tonic-gate 	arg.cyx_cpu = cpu;
1810*0Sstevel@tonic-gate 	arg.cyx_when = when;
1811*0Sstevel@tonic-gate 	arg.cyx_wait = wait;
1812*0Sstevel@tonic-gate 
1813*0Sstevel@tonic-gate 	ASSERT(cpu->cyp_state == CYS_ONLINE);
1814*0Sstevel@tonic-gate 	cpu->cyp_state = CYS_REMOVING;
1815*0Sstevel@tonic-gate 
1816*0Sstevel@tonic-gate 	be->cyb_xcall(be->cyb_arg, cpu->cyp_cpu,
1817*0Sstevel@tonic-gate 	    (cyc_func_t)cyclic_remove_xcall, &arg);
1818*0Sstevel@tonic-gate 
1819*0Sstevel@tonic-gate 	/*
1820*0Sstevel@tonic-gate 	 * If the cyclic we removed wasn't at CY_HIGH_LEVEL, then we need to
1821*0Sstevel@tonic-gate 	 * check the cyp_rpend.  If it's non-zero, then we need to wait here
1822*0Sstevel@tonic-gate 	 * for all pending cyclic handlers to run.
1823*0Sstevel@tonic-gate 	 */
1824*0Sstevel@tonic-gate 	ASSERT(!(level == CY_HIGH_LEVEL && cpu->cyp_rpend != 0));
1825*0Sstevel@tonic-gate 	ASSERT(!(wait == CY_NOWAIT && cpu->cyp_rpend != 0));
1826*0Sstevel@tonic-gate 	ASSERT(!(arg.cyx_wait == CY_NOWAIT && cpu->cyp_rpend != 0));
1827*0Sstevel@tonic-gate 
1828*0Sstevel@tonic-gate 	if (wait != arg.cyx_wait) {
1829*0Sstevel@tonic-gate 		/*
1830*0Sstevel@tonic-gate 		 * We are being told that we must wait if we want to
1831*0Sstevel@tonic-gate 		 * remove this cyclic; put the CPU back in the CYS_ONLINE
1832*0Sstevel@tonic-gate 		 * state and return failure.
1833*0Sstevel@tonic-gate 		 */
1834*0Sstevel@tonic-gate 		ASSERT(wait == CY_NOWAIT && arg.cyx_wait == CY_WAIT);
1835*0Sstevel@tonic-gate 		ASSERT(cpu->cyp_state == CYS_REMOVING);
1836*0Sstevel@tonic-gate 		cpu->cyp_state = CYS_ONLINE;
1837*0Sstevel@tonic-gate 
1838*0Sstevel@tonic-gate 		return (0);
1839*0Sstevel@tonic-gate 	}
1840*0Sstevel@tonic-gate 
1841*0Sstevel@tonic-gate 	if (cpu->cyp_rpend != 0)
1842*0Sstevel@tonic-gate 		sema_p(&cpu->cyp_modify_wait);
1843*0Sstevel@tonic-gate 
1844*0Sstevel@tonic-gate 	ASSERT(cpu->cyp_state == CYS_REMOVING);
1845*0Sstevel@tonic-gate 
1846*0Sstevel@tonic-gate 	cpu->cyp_rpend = 0;
1847*0Sstevel@tonic-gate 	cpu->cyp_state = CYS_ONLINE;
1848*0Sstevel@tonic-gate 
1849*0Sstevel@tonic-gate 	return (1);
1850*0Sstevel@tonic-gate }
1851*0Sstevel@tonic-gate 
1852*0Sstevel@tonic-gate /*
1853*0Sstevel@tonic-gate  * cyclic_juggle_one_to() should only be called when the source cyclic
1854*0Sstevel@tonic-gate  * can be juggled and the destination CPU is known to be able to accept
1855*0Sstevel@tonic-gate  * it.
1856*0Sstevel@tonic-gate  */
1857*0Sstevel@tonic-gate static void
1858*0Sstevel@tonic-gate cyclic_juggle_one_to(cyc_id_t *idp, cyc_cpu_t *dest)
1859*0Sstevel@tonic-gate {
1860*0Sstevel@tonic-gate 	cyc_cpu_t *src = idp->cyi_cpu;
1861*0Sstevel@tonic-gate 	cyc_index_t ndx = idp->cyi_ndx;
1862*0Sstevel@tonic-gate 	cyc_time_t when;
1863*0Sstevel@tonic-gate 	cyc_handler_t hdlr;
1864*0Sstevel@tonic-gate 	cyclic_t *cyclic;
1865*0Sstevel@tonic-gate 	uint16_t flags;
1866*0Sstevel@tonic-gate 	hrtime_t delay;
1867*0Sstevel@tonic-gate 
1868*0Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&cpu_lock));
1869*0Sstevel@tonic-gate 	ASSERT(src != NULL && idp->cyi_omni_list == NULL);
1870*0Sstevel@tonic-gate 	ASSERT(!(dest->cyp_cpu->cpu_flags & (CPU_QUIESCED | CPU_OFFLINE)));
1871*0Sstevel@tonic-gate 	CYC_PTRACE("juggle-one-to", idp, dest);
1872*0Sstevel@tonic-gate 
1873*0Sstevel@tonic-gate 	cyclic = &src->cyp_cyclics[ndx];
1874*0Sstevel@tonic-gate 
1875*0Sstevel@tonic-gate 	flags = cyclic->cy_flags;
1876*0Sstevel@tonic-gate 	ASSERT(!(flags & CYF_CPU_BOUND) && !(flags & CYF_FREE));
1877*0Sstevel@tonic-gate 
1878*0Sstevel@tonic-gate 	hdlr.cyh_func = cyclic->cy_handler;
1879*0Sstevel@tonic-gate 	hdlr.cyh_level = cyclic->cy_level;
1880*0Sstevel@tonic-gate 	hdlr.cyh_arg = cyclic->cy_arg;
1881*0Sstevel@tonic-gate 
1882*0Sstevel@tonic-gate 	/*
1883*0Sstevel@tonic-gate 	 * Before we begin the juggling process, see if the destination
1884*0Sstevel@tonic-gate 	 * CPU requires an expansion.  If it does, we'll perform the
1885*0Sstevel@tonic-gate 	 * expansion before removing the cyclic.  This is to prevent us
1886*0Sstevel@tonic-gate 	 * from blocking while a system-critical cyclic (notably, the clock
1887*0Sstevel@tonic-gate 	 * cyclic) isn't on a CPU.
1888*0Sstevel@tonic-gate 	 */
1889*0Sstevel@tonic-gate 	if (dest->cyp_nelems == dest->cyp_size) {
1890*0Sstevel@tonic-gate 		CYC_PTRACE("remove-expand", idp, dest);
1891*0Sstevel@tonic-gate 		cyclic_expand(dest);
1892*0Sstevel@tonic-gate 		ASSERT(dest->cyp_nelems < dest->cyp_size);
1893*0Sstevel@tonic-gate 	}
1894*0Sstevel@tonic-gate 
1895*0Sstevel@tonic-gate 	/*
1896*0Sstevel@tonic-gate 	 * Remove the cyclic from the source.  As mentioned above, we cannot
1897*0Sstevel@tonic-gate 	 * block during this operation; if we cannot remove the cyclic
1898*0Sstevel@tonic-gate 	 * without waiting, we spin for a time shorter than the interval, and
1899*0Sstevel@tonic-gate 	 * reattempt the (non-blocking) removal.  If we continue to fail,
1900*0Sstevel@tonic-gate 	 * we will exponentially back off (up to half of the interval).
1901*0Sstevel@tonic-gate 	 * Note that the removal will ultimately succeed -- even if the
1902*0Sstevel@tonic-gate 	 * cyclic handler is blocked on a resource held by a thread which we
1903*0Sstevel@tonic-gate 	 * have preempted, priority inheritance assures that the preempted
1904*0Sstevel@tonic-gate 	 * thread will preempt us and continue to progress.
1905*0Sstevel@tonic-gate 	 */
1906*0Sstevel@tonic-gate 	for (delay = NANOSEC / MICROSEC; ; delay <<= 1) {
1907*0Sstevel@tonic-gate 		/*
1908*0Sstevel@tonic-gate 		 * Before we begin this operation, disable kernel preemption.
1909*0Sstevel@tonic-gate 		 */
1910*0Sstevel@tonic-gate 		kpreempt_disable();
1911*0Sstevel@tonic-gate 		if (cyclic_remove_here(src, ndx, &when, CY_NOWAIT))
1912*0Sstevel@tonic-gate 			break;
1913*0Sstevel@tonic-gate 
1914*0Sstevel@tonic-gate 		/*
1915*0Sstevel@tonic-gate 		 * The operation failed; enable kernel preemption while
1916*0Sstevel@tonic-gate 		 * spinning.
1917*0Sstevel@tonic-gate 		 */
1918*0Sstevel@tonic-gate 		kpreempt_enable();
1919*0Sstevel@tonic-gate 
1920*0Sstevel@tonic-gate 		CYC_PTRACE("remove-retry", idp, src);
1921*0Sstevel@tonic-gate 
1922*0Sstevel@tonic-gate 		if (delay > (cyclic->cy_interval >> 1))
1923*0Sstevel@tonic-gate 			delay = cyclic->cy_interval >> 1;
1924*0Sstevel@tonic-gate 
1925*0Sstevel@tonic-gate 		drv_usecwait((clock_t)(delay / (NANOSEC / MICROSEC)));
1926*0Sstevel@tonic-gate 	}
1927*0Sstevel@tonic-gate 
1928*0Sstevel@tonic-gate 	/*
1929*0Sstevel@tonic-gate 	 * Now add the cyclic to the destination.  This won't block; we
1930*0Sstevel@tonic-gate 	 * performed any necessary (blocking) expansion of the destination
1931*0Sstevel@tonic-gate 	 * CPU before removing the cyclic from the source CPU.
1932*0Sstevel@tonic-gate 	 */
1933*0Sstevel@tonic-gate 	idp->cyi_ndx = cyclic_add_here(dest, &hdlr, &when, flags);
1934*0Sstevel@tonic-gate 	idp->cyi_cpu = dest;
1935*0Sstevel@tonic-gate 	kpreempt_enable();
1936*0Sstevel@tonic-gate }
1937*0Sstevel@tonic-gate 
1938*0Sstevel@tonic-gate static int
1939*0Sstevel@tonic-gate cyclic_juggle_one(cyc_id_t *idp)
1940*0Sstevel@tonic-gate {
1941*0Sstevel@tonic-gate 	cyc_index_t ndx = idp->cyi_ndx;
1942*0Sstevel@tonic-gate 	cyc_cpu_t *cpu = idp->cyi_cpu, *dest;
1943*0Sstevel@tonic-gate 	cyclic_t *cyclic = &cpu->cyp_cyclics[ndx];
1944*0Sstevel@tonic-gate 	cpu_t *c = cpu->cyp_cpu;
1945*0Sstevel@tonic-gate 	cpupart_t *part = c->cpu_part;
1946*0Sstevel@tonic-gate 
1947*0Sstevel@tonic-gate 	CYC_PTRACE("juggle-one", idp, cpu);
1948*0Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&cpu_lock));
1949*0Sstevel@tonic-gate 	ASSERT(!(c->cpu_flags & CPU_OFFLINE));
1950*0Sstevel@tonic-gate 	ASSERT(cpu->cyp_state == CYS_ONLINE);
1951*0Sstevel@tonic-gate 	ASSERT(!(cyclic->cy_flags & CYF_FREE));
1952*0Sstevel@tonic-gate 
1953*0Sstevel@tonic-gate 	if ((dest = cyclic_pick_cpu(part, c, c, cyclic->cy_flags)) == NULL) {
1954*0Sstevel@tonic-gate 		/*
1955*0Sstevel@tonic-gate 		 * Bad news:  this cyclic can't be juggled.
1956*0Sstevel@tonic-gate 		 */
1957*0Sstevel@tonic-gate 		CYC_PTRACE("juggle-fail", idp, cpu)
1958*0Sstevel@tonic-gate 		return (0);
1959*0Sstevel@tonic-gate 	}
1960*0Sstevel@tonic-gate 
1961*0Sstevel@tonic-gate 	cyclic_juggle_one_to(idp, dest);
1962*0Sstevel@tonic-gate 
1963*0Sstevel@tonic-gate 	return (1);
1964*0Sstevel@tonic-gate }
1965*0Sstevel@tonic-gate 
1966*0Sstevel@tonic-gate static void
1967*0Sstevel@tonic-gate cyclic_unbind_cpu(cyclic_id_t id)
1968*0Sstevel@tonic-gate {
1969*0Sstevel@tonic-gate 	cyc_id_t *idp = (cyc_id_t *)id;
1970*0Sstevel@tonic-gate 	cyc_cpu_t *cpu = idp->cyi_cpu;
1971*0Sstevel@tonic-gate 	cpu_t *c = cpu->cyp_cpu;
1972*0Sstevel@tonic-gate 	cyclic_t *cyclic = &cpu->cyp_cyclics[idp->cyi_ndx];
1973*0Sstevel@tonic-gate 
1974*0Sstevel@tonic-gate 	CYC_PTRACE("unbind-cpu", id, cpu);
1975*0Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&cpu_lock));
1976*0Sstevel@tonic-gate 	ASSERT(cpu->cyp_state == CYS_ONLINE);
1977*0Sstevel@tonic-gate 	ASSERT(!(cyclic->cy_flags & CYF_FREE));
1978*0Sstevel@tonic-gate 	ASSERT(cyclic->cy_flags & CYF_CPU_BOUND);
1979*0Sstevel@tonic-gate 
1980*0Sstevel@tonic-gate 	cyclic->cy_flags &= ~CYF_CPU_BOUND;
1981*0Sstevel@tonic-gate 
1982*0Sstevel@tonic-gate 	/*
1983*0Sstevel@tonic-gate 	 * If we were bound to CPU which has interrupts disabled, we need
1984*0Sstevel@tonic-gate 	 * to juggle away.  This can only fail if we are bound to a
1985*0Sstevel@tonic-gate 	 * processor set, and if every CPU in the processor set has
1986*0Sstevel@tonic-gate 	 * interrupts disabled.
1987*0Sstevel@tonic-gate 	 */
1988*0Sstevel@tonic-gate 	if (!(c->cpu_flags & CPU_ENABLE)) {
1989*0Sstevel@tonic-gate 		int res = cyclic_juggle_one(idp);
1990*0Sstevel@tonic-gate 
1991*0Sstevel@tonic-gate 		ASSERT((res && idp->cyi_cpu != cpu) ||
1992*0Sstevel@tonic-gate 		    (!res && (cyclic->cy_flags & CYF_PART_BOUND)));
1993*0Sstevel@tonic-gate 	}
1994*0Sstevel@tonic-gate }
1995*0Sstevel@tonic-gate 
1996*0Sstevel@tonic-gate static void
1997*0Sstevel@tonic-gate cyclic_bind_cpu(cyclic_id_t id, cpu_t *d)
1998*0Sstevel@tonic-gate {
1999*0Sstevel@tonic-gate 	cyc_id_t *idp = (cyc_id_t *)id;
2000*0Sstevel@tonic-gate 	cyc_cpu_t *dest = d->cpu_cyclic, *cpu = idp->cyi_cpu;
2001*0Sstevel@tonic-gate 	cpu_t *c = cpu->cyp_cpu;
2002*0Sstevel@tonic-gate 	cyclic_t *cyclic = &cpu->cyp_cyclics[idp->cyi_ndx];
2003*0Sstevel@tonic-gate 	cpupart_t *part = c->cpu_part;
2004*0Sstevel@tonic-gate 
2005*0Sstevel@tonic-gate 	CYC_PTRACE("bind-cpu", id, dest);
2006*0Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&cpu_lock));
2007*0Sstevel@tonic-gate 	ASSERT(!(d->cpu_flags & CPU_OFFLINE));
2008*0Sstevel@tonic-gate 	ASSERT(!(c->cpu_flags & CPU_OFFLINE));
2009*0Sstevel@tonic-gate 	ASSERT(cpu->cyp_state == CYS_ONLINE);
2010*0Sstevel@tonic-gate 	ASSERT(dest != NULL);
2011*0Sstevel@tonic-gate 	ASSERT(dest->cyp_state == CYS_ONLINE);
2012*0Sstevel@tonic-gate 	ASSERT(!(cyclic->cy_flags & CYF_FREE));
2013*0Sstevel@tonic-gate 	ASSERT(!(cyclic->cy_flags & CYF_CPU_BOUND));
2014*0Sstevel@tonic-gate 
2015*0Sstevel@tonic-gate 	dest = cyclic_pick_cpu(part, d, NULL, cyclic->cy_flags | CYF_CPU_BOUND);
2016*0Sstevel@tonic-gate 
2017*0Sstevel@tonic-gate 	if (dest != cpu) {
2018*0Sstevel@tonic-gate 		cyclic_juggle_one_to(idp, dest);
2019*0Sstevel@tonic-gate 		cyclic = &dest->cyp_cyclics[idp->cyi_ndx];
2020*0Sstevel@tonic-gate 	}
2021*0Sstevel@tonic-gate 
2022*0Sstevel@tonic-gate 	cyclic->cy_flags |= CYF_CPU_BOUND;
2023*0Sstevel@tonic-gate }
2024*0Sstevel@tonic-gate 
2025*0Sstevel@tonic-gate static void
2026*0Sstevel@tonic-gate cyclic_unbind_cpupart(cyclic_id_t id)
2027*0Sstevel@tonic-gate {
2028*0Sstevel@tonic-gate 	cyc_id_t *idp = (cyc_id_t *)id;
2029*0Sstevel@tonic-gate 	cyc_cpu_t *cpu = idp->cyi_cpu;
2030*0Sstevel@tonic-gate 	cpu_t *c = cpu->cyp_cpu;
2031*0Sstevel@tonic-gate 	cyclic_t *cyc = &cpu->cyp_cyclics[idp->cyi_ndx];
2032*0Sstevel@tonic-gate 
2033*0Sstevel@tonic-gate 	CYC_PTRACE("unbind-part", idp, c->cpu_part);
2034*0Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&cpu_lock));
2035*0Sstevel@tonic-gate 	ASSERT(cpu->cyp_state == CYS_ONLINE);
2036*0Sstevel@tonic-gate 	ASSERT(!(cyc->cy_flags & CYF_FREE));
2037*0Sstevel@tonic-gate 	ASSERT(cyc->cy_flags & CYF_PART_BOUND);
2038*0Sstevel@tonic-gate 
2039*0Sstevel@tonic-gate 	cyc->cy_flags &= ~CYF_PART_BOUND;
2040*0Sstevel@tonic-gate 
2041*0Sstevel@tonic-gate 	/*
2042*0Sstevel@tonic-gate 	 * If we're on a CPU which has interrupts disabled (and if this cyclic
2043*0Sstevel@tonic-gate 	 * isn't bound to the CPU), we need to juggle away.
2044*0Sstevel@tonic-gate 	 */
2045*0Sstevel@tonic-gate 	if (!(c->cpu_flags & CPU_ENABLE) && !(cyc->cy_flags & CYF_CPU_BOUND)) {
2046*0Sstevel@tonic-gate 		int res = cyclic_juggle_one(idp);
2047*0Sstevel@tonic-gate 
2048*0Sstevel@tonic-gate 		ASSERT(res && idp->cyi_cpu != cpu);
2049*0Sstevel@tonic-gate 	}
2050*0Sstevel@tonic-gate }
2051*0Sstevel@tonic-gate 
2052*0Sstevel@tonic-gate static void
2053*0Sstevel@tonic-gate cyclic_bind_cpupart(cyclic_id_t id, cpupart_t *part)
2054*0Sstevel@tonic-gate {
2055*0Sstevel@tonic-gate 	cyc_id_t *idp = (cyc_id_t *)id;
2056*0Sstevel@tonic-gate 	cyc_cpu_t *cpu = idp->cyi_cpu, *dest;
2057*0Sstevel@tonic-gate 	cpu_t *c = cpu->cyp_cpu;
2058*0Sstevel@tonic-gate 	cyclic_t *cyc = &cpu->cyp_cyclics[idp->cyi_ndx];
2059*0Sstevel@tonic-gate 
2060*0Sstevel@tonic-gate 	CYC_PTRACE("bind-part", idp, part);
2061*0Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&cpu_lock));
2062*0Sstevel@tonic-gate 	ASSERT(!(c->cpu_flags & CPU_OFFLINE));
2063*0Sstevel@tonic-gate 	ASSERT(cpu->cyp_state == CYS_ONLINE);
2064*0Sstevel@tonic-gate 	ASSERT(!(cyc->cy_flags & CYF_FREE));
2065*0Sstevel@tonic-gate 	ASSERT(!(cyc->cy_flags & CYF_PART_BOUND));
2066*0Sstevel@tonic-gate 	ASSERT(part->cp_ncpus > 0);
2067*0Sstevel@tonic-gate 
2068*0Sstevel@tonic-gate 	dest = cyclic_pick_cpu(part, c, NULL, cyc->cy_flags | CYF_PART_BOUND);
2069*0Sstevel@tonic-gate 
2070*0Sstevel@tonic-gate 	if (dest != cpu) {
2071*0Sstevel@tonic-gate 		cyclic_juggle_one_to(idp, dest);
2072*0Sstevel@tonic-gate 		cyc = &dest->cyp_cyclics[idp->cyi_ndx];
2073*0Sstevel@tonic-gate 	}
2074*0Sstevel@tonic-gate 
2075*0Sstevel@tonic-gate 	cyc->cy_flags |= CYF_PART_BOUND;
2076*0Sstevel@tonic-gate }
2077*0Sstevel@tonic-gate 
2078*0Sstevel@tonic-gate static void
2079*0Sstevel@tonic-gate cyclic_configure(cpu_t *c)
2080*0Sstevel@tonic-gate {
2081*0Sstevel@tonic-gate 	cyc_cpu_t *cpu = kmem_zalloc(sizeof (cyc_cpu_t), KM_SLEEP);
2082*0Sstevel@tonic-gate 	cyc_backend_t *nbe = kmem_zalloc(sizeof (cyc_backend_t), KM_SLEEP);
2083*0Sstevel@tonic-gate 	int i;
2084*0Sstevel@tonic-gate 
2085*0Sstevel@tonic-gate 	CYC_PTRACE1("configure", cpu);
2086*0Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&cpu_lock));
2087*0Sstevel@tonic-gate 
2088*0Sstevel@tonic-gate 	if (cyclic_id_cache == NULL)
2089*0Sstevel@tonic-gate 		cyclic_id_cache = kmem_cache_create("cyclic_id_cache",
2090*0Sstevel@tonic-gate 		    sizeof (cyc_id_t), 0, NULL, NULL, NULL, NULL, NULL, 0);
2091*0Sstevel@tonic-gate 
2092*0Sstevel@tonic-gate 	cpu->cyp_cpu = c;
2093*0Sstevel@tonic-gate 
2094*0Sstevel@tonic-gate 	sema_init(&cpu->cyp_modify_wait, 0, NULL, SEMA_DEFAULT, NULL);
2095*0Sstevel@tonic-gate 
2096*0Sstevel@tonic-gate 	cpu->cyp_size = 1;
2097*0Sstevel@tonic-gate 	cpu->cyp_heap = kmem_zalloc(sizeof (cyc_index_t), KM_SLEEP);
2098*0Sstevel@tonic-gate 	cpu->cyp_cyclics = kmem_zalloc(sizeof (cyclic_t), KM_SLEEP);
2099*0Sstevel@tonic-gate 	cpu->cyp_cyclics->cy_flags = CYF_FREE;
2100*0Sstevel@tonic-gate 
2101*0Sstevel@tonic-gate 	for (i = CY_LOW_LEVEL; i < CY_LOW_LEVEL + CY_SOFT_LEVELS; i++) {
2102*0Sstevel@tonic-gate 		/*
2103*0Sstevel@tonic-gate 		 * We don't need to set the sizemask; it's already zero
2104*0Sstevel@tonic-gate 		 * (which is the appropriate sizemask for a size of 1).
2105*0Sstevel@tonic-gate 		 */
2106*0Sstevel@tonic-gate 		cpu->cyp_softbuf[i].cys_buf[0].cypc_buf =
2107*0Sstevel@tonic-gate 		    kmem_alloc(sizeof (cyc_index_t), KM_SLEEP);
2108*0Sstevel@tonic-gate 	}
2109*0Sstevel@tonic-gate 
2110*0Sstevel@tonic-gate 	cpu->cyp_state = CYS_OFFLINE;
2111*0Sstevel@tonic-gate 
2112*0Sstevel@tonic-gate 	/*
2113*0Sstevel@tonic-gate 	 * Setup the backend for this CPU.
2114*0Sstevel@tonic-gate 	 */
2115*0Sstevel@tonic-gate 	bcopy(&cyclic_backend, nbe, sizeof (cyc_backend_t));
2116*0Sstevel@tonic-gate 	nbe->cyb_arg = nbe->cyb_configure(c);
2117*0Sstevel@tonic-gate 	cpu->cyp_backend = nbe;
2118*0Sstevel@tonic-gate 
2119*0Sstevel@tonic-gate 	/*
2120*0Sstevel@tonic-gate 	 * On platforms where stray interrupts may be taken during startup,
2121*0Sstevel@tonic-gate 	 * the CPU's cpu_cyclic pointer serves as an indicator that the
2122*0Sstevel@tonic-gate 	 * cyclic subsystem for this CPU is prepared to field interrupts.
2123*0Sstevel@tonic-gate 	 */
2124*0Sstevel@tonic-gate 	membar_producer();
2125*0Sstevel@tonic-gate 
2126*0Sstevel@tonic-gate 	c->cpu_cyclic = cpu;
2127*0Sstevel@tonic-gate }
2128*0Sstevel@tonic-gate 
2129*0Sstevel@tonic-gate static void
2130*0Sstevel@tonic-gate cyclic_unconfigure(cpu_t *c)
2131*0Sstevel@tonic-gate {
2132*0Sstevel@tonic-gate 	cyc_cpu_t *cpu = c->cpu_cyclic;
2133*0Sstevel@tonic-gate 	cyc_backend_t *be = cpu->cyp_backend;
2134*0Sstevel@tonic-gate 	cyb_arg_t bar = be->cyb_arg;
2135*0Sstevel@tonic-gate 	int i;
2136*0Sstevel@tonic-gate 
2137*0Sstevel@tonic-gate 	CYC_PTRACE1("unconfigure", cpu);
2138*0Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&cpu_lock));
2139*0Sstevel@tonic-gate 	ASSERT(cpu->cyp_state == CYS_OFFLINE);
2140*0Sstevel@tonic-gate 	ASSERT(cpu->cyp_nelems == 0);
2141*0Sstevel@tonic-gate 
2142*0Sstevel@tonic-gate 	/*
2143*0Sstevel@tonic-gate 	 * Let the backend know that the CPU is being yanked, and free up
2144*0Sstevel@tonic-gate 	 * the backend structure.
2145*0Sstevel@tonic-gate 	 */
2146*0Sstevel@tonic-gate 	be->cyb_unconfigure(bar);
2147*0Sstevel@tonic-gate 	kmem_free(be, sizeof (cyc_backend_t));
2148*0Sstevel@tonic-gate 	cpu->cyp_backend = NULL;
2149*0Sstevel@tonic-gate 
2150*0Sstevel@tonic-gate 	/*
2151*0Sstevel@tonic-gate 	 * Free up the producer/consumer buffers at each of the soft levels.
2152*0Sstevel@tonic-gate 	 */
2153*0Sstevel@tonic-gate 	for (i = CY_LOW_LEVEL; i < CY_LOW_LEVEL + CY_SOFT_LEVELS; i++) {
2154*0Sstevel@tonic-gate 		cyc_softbuf_t *softbuf = &cpu->cyp_softbuf[i];
2155*0Sstevel@tonic-gate 		uchar_t hard = softbuf->cys_hard;
2156*0Sstevel@tonic-gate 		cyc_pcbuffer_t *pc = &softbuf->cys_buf[hard];
2157*0Sstevel@tonic-gate 		size_t bufsize = sizeof (cyc_index_t) * (pc->cypc_sizemask + 1);
2158*0Sstevel@tonic-gate 
2159*0Sstevel@tonic-gate 		/*
2160*0Sstevel@tonic-gate 		 * Assert that we're not in the middle of a resize operation.
2161*0Sstevel@tonic-gate 		 */
2162*0Sstevel@tonic-gate 		ASSERT(hard == softbuf->cys_soft);
2163*0Sstevel@tonic-gate 		ASSERT(hard == 0 || hard == 1);
2164*0Sstevel@tonic-gate 		ASSERT(pc->cypc_buf != NULL);
2165*0Sstevel@tonic-gate 		ASSERT(softbuf->cys_buf[hard ^ 1].cypc_buf == NULL);
2166*0Sstevel@tonic-gate 
2167*0Sstevel@tonic-gate 		kmem_free(pc->cypc_buf, bufsize);
2168*0Sstevel@tonic-gate 		pc->cypc_buf = NULL;
2169*0Sstevel@tonic-gate 	}
2170*0Sstevel@tonic-gate 
2171*0Sstevel@tonic-gate 	/*
2172*0Sstevel@tonic-gate 	 * Finally, clean up our remaining dynamic structures and NULL out
2173*0Sstevel@tonic-gate 	 * the cpu_cyclic pointer.
2174*0Sstevel@tonic-gate 	 */
2175*0Sstevel@tonic-gate 	kmem_free(cpu->cyp_cyclics, cpu->cyp_size * sizeof (cyclic_t));
2176*0Sstevel@tonic-gate 	kmem_free(cpu->cyp_heap, cpu->cyp_size * sizeof (cyc_index_t));
2177*0Sstevel@tonic-gate 	kmem_free(cpu, sizeof (cyc_cpu_t));
2178*0Sstevel@tonic-gate 
2179*0Sstevel@tonic-gate 	c->cpu_cyclic = NULL;
2180*0Sstevel@tonic-gate }
2181*0Sstevel@tonic-gate 
2182*0Sstevel@tonic-gate static int
2183*0Sstevel@tonic-gate cyclic_cpu_setup(cpu_setup_t what, int id)
2184*0Sstevel@tonic-gate {
2185*0Sstevel@tonic-gate 	/*
2186*0Sstevel@tonic-gate 	 * We are guaranteed that there is still/already an entry in the
2187*0Sstevel@tonic-gate 	 * cpu array for this CPU.
2188*0Sstevel@tonic-gate 	 */
2189*0Sstevel@tonic-gate 	cpu_t *c = cpu[id];
2190*0Sstevel@tonic-gate 	cyc_cpu_t *cyp = c->cpu_cyclic;
2191*0Sstevel@tonic-gate 
2192*0Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&cpu_lock));
2193*0Sstevel@tonic-gate 
2194*0Sstevel@tonic-gate 	switch (what) {
2195*0Sstevel@tonic-gate 	case CPU_CONFIG:
2196*0Sstevel@tonic-gate 		ASSERT(cyp == NULL);
2197*0Sstevel@tonic-gate 		cyclic_configure(c);
2198*0Sstevel@tonic-gate 		break;
2199*0Sstevel@tonic-gate 
2200*0Sstevel@tonic-gate 	case CPU_UNCONFIG:
2201*0Sstevel@tonic-gate 		ASSERT(cyp != NULL && cyp->cyp_state == CYS_OFFLINE);
2202*0Sstevel@tonic-gate 		cyclic_unconfigure(c);
2203*0Sstevel@tonic-gate 		break;
2204*0Sstevel@tonic-gate 
2205*0Sstevel@tonic-gate 	default:
2206*0Sstevel@tonic-gate 		break;
2207*0Sstevel@tonic-gate 	}
2208*0Sstevel@tonic-gate 
2209*0Sstevel@tonic-gate 	return (0);
2210*0Sstevel@tonic-gate }
2211*0Sstevel@tonic-gate 
2212*0Sstevel@tonic-gate static void
2213*0Sstevel@tonic-gate cyclic_suspend_xcall(cyc_xcallarg_t *arg)
2214*0Sstevel@tonic-gate {
2215*0Sstevel@tonic-gate 	cyc_cpu_t *cpu = arg->cyx_cpu;
2216*0Sstevel@tonic-gate 	cyc_backend_t *be = cpu->cyp_backend;
2217*0Sstevel@tonic-gate 	cyc_cookie_t cookie;
2218*0Sstevel@tonic-gate 	cyb_arg_t bar = be->cyb_arg;
2219*0Sstevel@tonic-gate 
2220*0Sstevel@tonic-gate 	cookie = be->cyb_set_level(bar, CY_HIGH_LEVEL);
2221*0Sstevel@tonic-gate 
2222*0Sstevel@tonic-gate 	CYC_TRACE1(cpu, CY_HIGH_LEVEL, "suspend-xcall", cpu->cyp_nelems);
2223*0Sstevel@tonic-gate 	ASSERT(cpu->cyp_state == CYS_ONLINE || cpu->cyp_state == CYS_OFFLINE);
2224*0Sstevel@tonic-gate 
2225*0Sstevel@tonic-gate 	/*
2226*0Sstevel@tonic-gate 	 * We won't disable this CPU unless it has a non-zero number of
2227*0Sstevel@tonic-gate 	 * elements (cpu_lock assures that no one else may be attempting
2228*0Sstevel@tonic-gate 	 * to disable this CPU).
2229*0Sstevel@tonic-gate 	 */
2230*0Sstevel@tonic-gate 	if (cpu->cyp_nelems > 0) {
2231*0Sstevel@tonic-gate 		ASSERT(cpu->cyp_state == CYS_ONLINE);
2232*0Sstevel@tonic-gate 		be->cyb_disable(bar);
2233*0Sstevel@tonic-gate 	}
2234*0Sstevel@tonic-gate 
2235*0Sstevel@tonic-gate 	if (cpu->cyp_state == CYS_ONLINE)
2236*0Sstevel@tonic-gate 		cpu->cyp_state = CYS_SUSPENDED;
2237*0Sstevel@tonic-gate 
2238*0Sstevel@tonic-gate 	be->cyb_suspend(bar);
2239*0Sstevel@tonic-gate 	be->cyb_restore_level(bar, cookie);
2240*0Sstevel@tonic-gate }
2241*0Sstevel@tonic-gate 
2242*0Sstevel@tonic-gate static void
2243*0Sstevel@tonic-gate cyclic_resume_xcall(cyc_xcallarg_t *arg)
2244*0Sstevel@tonic-gate {
2245*0Sstevel@tonic-gate 	cyc_cpu_t *cpu = arg->cyx_cpu;
2246*0Sstevel@tonic-gate 	cyc_backend_t *be = cpu->cyp_backend;
2247*0Sstevel@tonic-gate 	cyc_cookie_t cookie;
2248*0Sstevel@tonic-gate 	cyb_arg_t bar = be->cyb_arg;
2249*0Sstevel@tonic-gate 	cyc_state_t state = cpu->cyp_state;
2250*0Sstevel@tonic-gate 
2251*0Sstevel@tonic-gate 	cookie = be->cyb_set_level(bar, CY_HIGH_LEVEL);
2252*0Sstevel@tonic-gate 
2253*0Sstevel@tonic-gate 	CYC_TRACE1(cpu, CY_HIGH_LEVEL, "resume-xcall", cpu->cyp_nelems);
2254*0Sstevel@tonic-gate 	ASSERT(state == CYS_SUSPENDED || state == CYS_OFFLINE);
2255*0Sstevel@tonic-gate 
2256*0Sstevel@tonic-gate 	be->cyb_resume(bar);
2257*0Sstevel@tonic-gate 
2258*0Sstevel@tonic-gate 	/*
2259*0Sstevel@tonic-gate 	 * We won't enable this CPU unless it has a non-zero number of
2260*0Sstevel@tonic-gate 	 * elements.
2261*0Sstevel@tonic-gate 	 */
2262*0Sstevel@tonic-gate 	if (cpu->cyp_nelems > 0) {
2263*0Sstevel@tonic-gate 		cyclic_t *cyclic = &cpu->cyp_cyclics[cpu->cyp_heap[0]];
2264*0Sstevel@tonic-gate 		hrtime_t exp = cyclic->cy_expire;
2265*0Sstevel@tonic-gate 
2266*0Sstevel@tonic-gate 		CYC_TRACE(cpu, CY_HIGH_LEVEL, "resume-reprog", cyclic, exp);
2267*0Sstevel@tonic-gate 		ASSERT(state == CYS_SUSPENDED);
2268*0Sstevel@tonic-gate 		be->cyb_enable(bar);
2269*0Sstevel@tonic-gate 		be->cyb_reprogram(bar, exp);
2270*0Sstevel@tonic-gate 	}
2271*0Sstevel@tonic-gate 
2272*0Sstevel@tonic-gate 	if (state == CYS_SUSPENDED)
2273*0Sstevel@tonic-gate 		cpu->cyp_state = CYS_ONLINE;
2274*0Sstevel@tonic-gate 
2275*0Sstevel@tonic-gate 	CYC_TRACE1(cpu, CY_HIGH_LEVEL, "resume-done", cpu->cyp_nelems);
2276*0Sstevel@tonic-gate 	be->cyb_restore_level(bar, cookie);
2277*0Sstevel@tonic-gate }
2278*0Sstevel@tonic-gate 
2279*0Sstevel@tonic-gate static void
2280*0Sstevel@tonic-gate cyclic_omni_start(cyc_id_t *idp, cyc_cpu_t *cpu)
2281*0Sstevel@tonic-gate {
2282*0Sstevel@tonic-gate 	cyc_omni_handler_t *omni = &idp->cyi_omni_hdlr;
2283*0Sstevel@tonic-gate 	cyc_omni_cpu_t *ocpu = kmem_alloc(sizeof (cyc_omni_cpu_t), KM_SLEEP);
2284*0Sstevel@tonic-gate 	cyc_handler_t hdlr;
2285*0Sstevel@tonic-gate 	cyc_time_t when;
2286*0Sstevel@tonic-gate 
2287*0Sstevel@tonic-gate 	CYC_PTRACE("omni-start", cpu, idp);
2288*0Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&cpu_lock));
2289*0Sstevel@tonic-gate 	ASSERT(cpu->cyp_state == CYS_ONLINE);
2290*0Sstevel@tonic-gate 	ASSERT(idp->cyi_cpu == NULL);
2291*0Sstevel@tonic-gate 
2292*0Sstevel@tonic-gate 	hdlr.cyh_func = NULL;
2293*0Sstevel@tonic-gate 	hdlr.cyh_arg = NULL;
2294*0Sstevel@tonic-gate 	hdlr.cyh_level = CY_LEVELS;
2295*0Sstevel@tonic-gate 
2296*0Sstevel@tonic-gate 	when.cyt_when = 0;
2297*0Sstevel@tonic-gate 	when.cyt_interval = 0;
2298*0Sstevel@tonic-gate 
2299*0Sstevel@tonic-gate 	omni->cyo_online(omni->cyo_arg, cpu->cyp_cpu, &hdlr, &when);
2300*0Sstevel@tonic-gate 
2301*0Sstevel@tonic-gate 	ASSERT(hdlr.cyh_func != NULL);
2302*0Sstevel@tonic-gate 	ASSERT(hdlr.cyh_level < CY_LEVELS);
2303*0Sstevel@tonic-gate 	ASSERT(when.cyt_when >= 0 && when.cyt_interval > 0);
2304*0Sstevel@tonic-gate 
2305*0Sstevel@tonic-gate 	ocpu->cyo_cpu = cpu;
2306*0Sstevel@tonic-gate 	ocpu->cyo_arg = hdlr.cyh_arg;
2307*0Sstevel@tonic-gate 	ocpu->cyo_ndx = cyclic_add_here(cpu, &hdlr, &when, 0);
2308*0Sstevel@tonic-gate 	ocpu->cyo_next = idp->cyi_omni_list;
2309*0Sstevel@tonic-gate 	idp->cyi_omni_list = ocpu;
2310*0Sstevel@tonic-gate }
2311*0Sstevel@tonic-gate 
2312*0Sstevel@tonic-gate static void
2313*0Sstevel@tonic-gate cyclic_omni_stop(cyc_id_t *idp, cyc_cpu_t *cpu)
2314*0Sstevel@tonic-gate {
2315*0Sstevel@tonic-gate 	cyc_omni_handler_t *omni = &idp->cyi_omni_hdlr;
2316*0Sstevel@tonic-gate 	cyc_omni_cpu_t *ocpu = idp->cyi_omni_list, *prev = NULL;
2317*0Sstevel@tonic-gate 
2318*0Sstevel@tonic-gate 	CYC_PTRACE("omni-stop", cpu, idp);
2319*0Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&cpu_lock));
2320*0Sstevel@tonic-gate 	ASSERT(cpu->cyp_state == CYS_ONLINE);
2321*0Sstevel@tonic-gate 	ASSERT(idp->cyi_cpu == NULL);
2322*0Sstevel@tonic-gate 	ASSERT(ocpu != NULL);
2323*0Sstevel@tonic-gate 
2324*0Sstevel@tonic-gate 	while (ocpu != NULL && ocpu->cyo_cpu != cpu) {
2325*0Sstevel@tonic-gate 		prev = ocpu;
2326*0Sstevel@tonic-gate 		ocpu = ocpu->cyo_next;
2327*0Sstevel@tonic-gate 	}
2328*0Sstevel@tonic-gate 
2329*0Sstevel@tonic-gate 	/*
2330*0Sstevel@tonic-gate 	 * We _must_ have found an cyc_omni_cpu which corresponds to this
2331*0Sstevel@tonic-gate 	 * CPU -- the definition of an omnipresent cyclic is that it runs
2332*0Sstevel@tonic-gate 	 * on all online CPUs.
2333*0Sstevel@tonic-gate 	 */
2334*0Sstevel@tonic-gate 	ASSERT(ocpu != NULL);
2335*0Sstevel@tonic-gate 
2336*0Sstevel@tonic-gate 	if (prev == NULL) {
2337*0Sstevel@tonic-gate 		idp->cyi_omni_list = ocpu->cyo_next;
2338*0Sstevel@tonic-gate 	} else {
2339*0Sstevel@tonic-gate 		prev->cyo_next = ocpu->cyo_next;
2340*0Sstevel@tonic-gate 	}
2341*0Sstevel@tonic-gate 
2342*0Sstevel@tonic-gate 	(void) cyclic_remove_here(ocpu->cyo_cpu, ocpu->cyo_ndx, NULL, CY_WAIT);
2343*0Sstevel@tonic-gate 
2344*0Sstevel@tonic-gate 	/*
2345*0Sstevel@tonic-gate 	 * The cyclic has been removed from this CPU; time to call the
2346*0Sstevel@tonic-gate 	 * omnipresent offline handler.
2347*0Sstevel@tonic-gate 	 */
2348*0Sstevel@tonic-gate 	if (omni->cyo_offline != NULL)
2349*0Sstevel@tonic-gate 		omni->cyo_offline(omni->cyo_arg, cpu->cyp_cpu, ocpu->cyo_arg);
2350*0Sstevel@tonic-gate 
2351*0Sstevel@tonic-gate 	kmem_free(ocpu, sizeof (cyc_omni_cpu_t));
2352*0Sstevel@tonic-gate }
2353*0Sstevel@tonic-gate 
2354*0Sstevel@tonic-gate static cyc_id_t *
2355*0Sstevel@tonic-gate cyclic_new_id()
2356*0Sstevel@tonic-gate {
2357*0Sstevel@tonic-gate 	cyc_id_t *idp;
2358*0Sstevel@tonic-gate 
2359*0Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&cpu_lock));
2360*0Sstevel@tonic-gate 
2361*0Sstevel@tonic-gate 	idp = kmem_cache_alloc(cyclic_id_cache, KM_SLEEP);
2362*0Sstevel@tonic-gate 
2363*0Sstevel@tonic-gate 	/*
2364*0Sstevel@tonic-gate 	 * The cyi_cpu field of the cyc_id_t structure tracks the CPU
2365*0Sstevel@tonic-gate 	 * associated with the cyclic.  If and only if this field is NULL, the
2366*0Sstevel@tonic-gate 	 * cyc_id_t is an omnipresent cyclic.  Note that cyi_omni_list may be
2367*0Sstevel@tonic-gate 	 * NULL for an omnipresent cyclic while the cyclic is being created
2368*0Sstevel@tonic-gate 	 * or destroyed.
2369*0Sstevel@tonic-gate 	 */
2370*0Sstevel@tonic-gate 	idp->cyi_cpu = NULL;
2371*0Sstevel@tonic-gate 	idp->cyi_ndx = 0;
2372*0Sstevel@tonic-gate 
2373*0Sstevel@tonic-gate 	idp->cyi_next = cyclic_id_head;
2374*0Sstevel@tonic-gate 	idp->cyi_prev = NULL;
2375*0Sstevel@tonic-gate 	idp->cyi_omni_list = NULL;
2376*0Sstevel@tonic-gate 
2377*0Sstevel@tonic-gate 	if (cyclic_id_head != NULL) {
2378*0Sstevel@tonic-gate 		ASSERT(cyclic_id_head->cyi_prev == NULL);
2379*0Sstevel@tonic-gate 		cyclic_id_head->cyi_prev = idp;
2380*0Sstevel@tonic-gate 	}
2381*0Sstevel@tonic-gate 
2382*0Sstevel@tonic-gate 	cyclic_id_head = idp;
2383*0Sstevel@tonic-gate 
2384*0Sstevel@tonic-gate 	return (idp);
2385*0Sstevel@tonic-gate }
2386*0Sstevel@tonic-gate 
2387*0Sstevel@tonic-gate /*
2388*0Sstevel@tonic-gate  *  cyclic_id_t cyclic_add(cyc_handler_t *, cyc_time_t *)
2389*0Sstevel@tonic-gate  *
2390*0Sstevel@tonic-gate  *  Overview
2391*0Sstevel@tonic-gate  *
2392*0Sstevel@tonic-gate  *    cyclic_add() will create an unbound cyclic with the specified handler and
2393*0Sstevel@tonic-gate  *    interval.  The cyclic will run on a CPU which both has interrupts enabled
2394*0Sstevel@tonic-gate  *    and is in the system CPU partition.
2395*0Sstevel@tonic-gate  *
2396*0Sstevel@tonic-gate  *  Arguments and notes
2397*0Sstevel@tonic-gate  *
2398*0Sstevel@tonic-gate  *    As its first argument, cyclic_add() takes a cyc_handler, which has the
2399*0Sstevel@tonic-gate  *    following members:
2400*0Sstevel@tonic-gate  *
2401*0Sstevel@tonic-gate  *      cyc_func_t cyh_func    <-- Cyclic handler
2402*0Sstevel@tonic-gate  *      void *cyh_arg          <-- Argument to cyclic handler
2403*0Sstevel@tonic-gate  *      cyc_level_t cyh_level  <-- Level at which to fire; must be one of
2404*0Sstevel@tonic-gate  *                                 CY_LOW_LEVEL, CY_LOCK_LEVEL or CY_HIGH_LEVEL
2405*0Sstevel@tonic-gate  *
2406*0Sstevel@tonic-gate  *    Note that cyh_level is _not_ an ipl or spl; it must be one the
2407*0Sstevel@tonic-gate  *    CY_*_LEVELs.  This layer of abstraction allows the platform to define
2408*0Sstevel@tonic-gate  *    the precise interrupt priority levels, within the following constraints:
2409*0Sstevel@tonic-gate  *
2410*0Sstevel@tonic-gate  *       CY_LOCK_LEVEL must map to LOCK_LEVEL
2411*0Sstevel@tonic-gate  *       CY_HIGH_LEVEL must map to an ipl greater than LOCK_LEVEL
2412*0Sstevel@tonic-gate  *       CY_LOW_LEVEL must map to an ipl below LOCK_LEVEL
2413*0Sstevel@tonic-gate  *
2414*0Sstevel@tonic-gate  *    In addition to a cyc_handler, cyclic_add() takes a cyc_time, which
2415*0Sstevel@tonic-gate  *    has the following members:
2416*0Sstevel@tonic-gate  *
2417*0Sstevel@tonic-gate  *       hrtime_t cyt_when     <-- Absolute time, in nanoseconds since boot, at
2418*0Sstevel@tonic-gate  *                                 which to start firing
2419*0Sstevel@tonic-gate  *       hrtime_t cyt_interval <-- Length of interval, in nanoseconds
2420*0Sstevel@tonic-gate  *
2421*0Sstevel@tonic-gate  *    gethrtime() is the time source for nanoseconds since boot.  If cyt_when
2422*0Sstevel@tonic-gate  *    is set to 0, the cyclic will start to fire when cyt_interval next
2423*0Sstevel@tonic-gate  *    divides the number of nanoseconds since boot.
2424*0Sstevel@tonic-gate  *
2425*0Sstevel@tonic-gate  *    The cyt_interval field _must_ be filled in by the caller; one-shots are
2426*0Sstevel@tonic-gate  *    _not_ explicitly supported by the cyclic subsystem (cyclic_add() will
2427*0Sstevel@tonic-gate  *    assert that cyt_interval is non-zero).  The maximum value for either
2428*0Sstevel@tonic-gate  *    field is INT64_MAX; the caller is responsible for assuring that
2429*0Sstevel@tonic-gate  *    cyt_when + cyt_interval <= INT64_MAX.  Neither field may be negative.
2430*0Sstevel@tonic-gate  *
2431*0Sstevel@tonic-gate  *    For an arbitrary time t in the future, the cyclic handler is guaranteed
2432*0Sstevel@tonic-gate  *    to have been called (t - cyt_when) / cyt_interval times.  This will
2433*0Sstevel@tonic-gate  *    be true even if interrupts have been disabled for periods greater than
2434*0Sstevel@tonic-gate  *    cyt_interval nanoseconds.  In order to compensate for such periods,
2435*0Sstevel@tonic-gate  *    the cyclic handler may be called a finite number of times with an
2436*0Sstevel@tonic-gate  *    arbitrarily small interval.
2437*0Sstevel@tonic-gate  *
2438*0Sstevel@tonic-gate  *    The cyclic subsystem will not enforce any lower bound on the interval;
2439*0Sstevel@tonic-gate  *    if the interval is less than the time required to process an interrupt,
2440*0Sstevel@tonic-gate  *    the CPU will wedge.  It's the responsibility of the caller to assure that
2441*0Sstevel@tonic-gate  *    either the value of the interval is sane, or that its caller has
2442*0Sstevel@tonic-gate  *    sufficient privilege to deny service (i.e. its caller is root).
2443*0Sstevel@tonic-gate  *
2444*0Sstevel@tonic-gate  *    The cyclic handler is guaranteed to be single threaded, even while the
2445*0Sstevel@tonic-gate  *    cyclic is being juggled between CPUs (see cyclic_juggle(), below).
2446*0Sstevel@tonic-gate  *    That is, a given cyclic handler will never be executed simultaneously
2447*0Sstevel@tonic-gate  *    on different CPUs.
2448*0Sstevel@tonic-gate  *
2449*0Sstevel@tonic-gate  *  Return value
2450*0Sstevel@tonic-gate  *
2451*0Sstevel@tonic-gate  *    cyclic_add() returns a cyclic_id_t, which is guaranteed to be a value
2452*0Sstevel@tonic-gate  *    other than CYCLIC_NONE.  cyclic_add() cannot fail.
2453*0Sstevel@tonic-gate  *
2454*0Sstevel@tonic-gate  *  Caller's context
2455*0Sstevel@tonic-gate  *
2456*0Sstevel@tonic-gate  *    cpu_lock must be held by the caller, and the caller must not be in
2457*0Sstevel@tonic-gate  *    interrupt context.  cyclic_add() will perform a KM_SLEEP kernel
2458*0Sstevel@tonic-gate  *    memory allocation, so the usual rules (e.g. p_lock cannot be held)
2459*0Sstevel@tonic-gate  *    apply.  A cyclic may be added even in the presence of CPUs that have
2460*0Sstevel@tonic-gate  *    not been configured with respect to the cyclic subsystem, but only
2461*0Sstevel@tonic-gate  *    configured CPUs will be eligible to run the new cyclic.
2462*0Sstevel@tonic-gate  *
2463*0Sstevel@tonic-gate  *  Cyclic handler's context
2464*0Sstevel@tonic-gate  *
2465*0Sstevel@tonic-gate  *    Cyclic handlers will be executed in the interrupt context corresponding
2466*0Sstevel@tonic-gate  *    to the specified level (i.e. either high, lock or low level).  The
2467*0Sstevel@tonic-gate  *    usual context rules apply.
2468*0Sstevel@tonic-gate  *
2469*0Sstevel@tonic-gate  *    A cyclic handler may not grab ANY locks held by the caller of any of
2470*0Sstevel@tonic-gate  *    cyclic_add(), cyclic_remove() or cyclic_bind(); the implementation of
2471*0Sstevel@tonic-gate  *    these functions may require blocking on cyclic handler completion.
2472*0Sstevel@tonic-gate  *    Moreover, cyclic handlers may not make any call back into the cyclic
2473*0Sstevel@tonic-gate  *    subsystem.
2474*0Sstevel@tonic-gate  */
2475*0Sstevel@tonic-gate cyclic_id_t
2476*0Sstevel@tonic-gate cyclic_add(cyc_handler_t *hdlr, cyc_time_t *when)
2477*0Sstevel@tonic-gate {
2478*0Sstevel@tonic-gate 	cyc_id_t *idp = cyclic_new_id();
2479*0Sstevel@tonic-gate 
2480*0Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&cpu_lock));
2481*0Sstevel@tonic-gate 	ASSERT(when->cyt_when >= 0 && when->cyt_interval > 0);
2482*0Sstevel@tonic-gate 
2483*0Sstevel@tonic-gate 	idp->cyi_cpu = cyclic_pick_cpu(NULL, NULL, NULL, 0);
2484*0Sstevel@tonic-gate 	idp->cyi_ndx = cyclic_add_here(idp->cyi_cpu, hdlr, when, 0);
2485*0Sstevel@tonic-gate 
2486*0Sstevel@tonic-gate 	return ((uintptr_t)idp);
2487*0Sstevel@tonic-gate }
2488*0Sstevel@tonic-gate 
2489*0Sstevel@tonic-gate /*
2490*0Sstevel@tonic-gate  *  cyclic_id_t cyclic_add_omni(cyc_omni_handler_t *)
2491*0Sstevel@tonic-gate  *
2492*0Sstevel@tonic-gate  *  Overview
2493*0Sstevel@tonic-gate  *
2494*0Sstevel@tonic-gate  *    cyclic_add_omni() will create an omnipresent cyclic with the specified
2495*0Sstevel@tonic-gate  *    online and offline handlers.  Omnipresent cyclics run on all online
2496*0Sstevel@tonic-gate  *    CPUs, including CPUs which have unbound interrupts disabled.
2497*0Sstevel@tonic-gate  *
2498*0Sstevel@tonic-gate  *  Arguments
2499*0Sstevel@tonic-gate  *
2500*0Sstevel@tonic-gate  *    As its only argument, cyclic_add_omni() takes a cyc_omni_handler, which
2501*0Sstevel@tonic-gate  *    has the following members:
2502*0Sstevel@tonic-gate  *
2503*0Sstevel@tonic-gate  *      void (*cyo_online)()   <-- Online handler
2504*0Sstevel@tonic-gate  *      void (*cyo_offline)()  <-- Offline handler
2505*0Sstevel@tonic-gate  *      void *cyo_arg          <-- Argument to be passed to on/offline handlers
2506*0Sstevel@tonic-gate  *
2507*0Sstevel@tonic-gate  *  Online handler
2508*0Sstevel@tonic-gate  *
2509*0Sstevel@tonic-gate  *    The cyo_online member is a pointer to a function which has the following
2510*0Sstevel@tonic-gate  *    four arguments:
2511*0Sstevel@tonic-gate  *
2512*0Sstevel@tonic-gate  *      void *                 <-- Argument (cyo_arg)
2513*0Sstevel@tonic-gate  *      cpu_t *                <-- Pointer to CPU about to be onlined
2514*0Sstevel@tonic-gate  *      cyc_handler_t *        <-- Pointer to cyc_handler_t; must be filled in
2515*0Sstevel@tonic-gate  *                                 by omni online handler
2516*0Sstevel@tonic-gate  *      cyc_time_t *           <-- Pointer to cyc_time_t; must be filled in by
2517*0Sstevel@tonic-gate  *                                 omni online handler
2518*0Sstevel@tonic-gate  *
2519*0Sstevel@tonic-gate  *    The omni cyclic online handler is always called _before_ the omni
2520*0Sstevel@tonic-gate  *    cyclic begins to fire on the specified CPU.  As the above argument
2521*0Sstevel@tonic-gate  *    description implies, the online handler must fill in the two structures
2522*0Sstevel@tonic-gate  *    passed to it:  the cyc_handler_t and the cyc_time_t.  These are the
2523*0Sstevel@tonic-gate  *    same two structures passed to cyclic_add(), outlined above.  This
2524*0Sstevel@tonic-gate  *    allows the omni cyclic to have maximum flexibility; different CPUs may
2525*0Sstevel@tonic-gate  *    optionally
2526*0Sstevel@tonic-gate  *
2527*0Sstevel@tonic-gate  *      (a)  have different intervals
2528*0Sstevel@tonic-gate  *      (b)  be explicitly in or out of phase with one another
2529*0Sstevel@tonic-gate  *      (c)  have different handlers
2530*0Sstevel@tonic-gate  *      (d)  have different handler arguments
2531*0Sstevel@tonic-gate  *      (e)  fire at different levels
2532*0Sstevel@tonic-gate  *
2533*0Sstevel@tonic-gate  *    Of these, (e) seems somewhat dubious, but is nonetheless allowed.
2534*0Sstevel@tonic-gate  *
2535*0Sstevel@tonic-gate  *    The omni online handler is called in the same context as cyclic_add(),
2536*0Sstevel@tonic-gate  *    and has the same liberties:  omni online handlers may perform KM_SLEEP
2537*0Sstevel@tonic-gate  *    kernel memory allocations, and may grab locks which are also acquired
2538*0Sstevel@tonic-gate  *    by cyclic handlers.  However, omni cyclic online handlers may _not_
2539*0Sstevel@tonic-gate  *    call back into the cyclic subsystem, and should be generally careful
2540*0Sstevel@tonic-gate  *    about calling into arbitrary kernel subsystems.
2541*0Sstevel@tonic-gate  *
2542*0Sstevel@tonic-gate  *  Offline handler
2543*0Sstevel@tonic-gate  *
2544*0Sstevel@tonic-gate  *    The cyo_offline member is a pointer to a function which has the following
2545*0Sstevel@tonic-gate  *    three arguments:
2546*0Sstevel@tonic-gate  *
2547*0Sstevel@tonic-gate  *      void *                 <-- Argument (cyo_arg)
2548*0Sstevel@tonic-gate  *      cpu_t *                <-- Pointer to CPU about to be offlined
2549*0Sstevel@tonic-gate  *      void *                 <-- CPU's cyclic argument (that is, value
2550*0Sstevel@tonic-gate  *                                 to which cyh_arg member of the cyc_handler_t
2551*0Sstevel@tonic-gate  *                                 was set in the omni online handler)
2552*0Sstevel@tonic-gate  *
2553*0Sstevel@tonic-gate  *    The omni cyclic offline handler is always called _after_ the omni
2554*0Sstevel@tonic-gate  *    cyclic has ceased firing on the specified CPU.  Its purpose is to
2555*0Sstevel@tonic-gate  *    allow cleanup of any resources dynamically allocated in the omni cyclic
2556*0Sstevel@tonic-gate  *    online handler.  The context of the offline handler is identical to
2557*0Sstevel@tonic-gate  *    that of the online handler; the same constraints and liberties apply.
2558*0Sstevel@tonic-gate  *
2559*0Sstevel@tonic-gate  *    The offline handler is optional; it may be NULL.
2560*0Sstevel@tonic-gate  *
2561*0Sstevel@tonic-gate  *  Return value
2562*0Sstevel@tonic-gate  *
2563*0Sstevel@tonic-gate  *    cyclic_add_omni() returns a cyclic_id_t, which is guaranteed to be a
2564*0Sstevel@tonic-gate  *    value other than CYCLIC_NONE.  cyclic_add_omni() cannot fail.
2565*0Sstevel@tonic-gate  *
2566*0Sstevel@tonic-gate  *  Caller's context
2567*0Sstevel@tonic-gate  *
2568*0Sstevel@tonic-gate  *    The caller's context is identical to that of cyclic_add(), specified
2569*0Sstevel@tonic-gate  *    above.
2570*0Sstevel@tonic-gate  */
2571*0Sstevel@tonic-gate cyclic_id_t
2572*0Sstevel@tonic-gate cyclic_add_omni(cyc_omni_handler_t *omni)
2573*0Sstevel@tonic-gate {
2574*0Sstevel@tonic-gate 	cyc_id_t *idp = cyclic_new_id();
2575*0Sstevel@tonic-gate 	cyc_cpu_t *cpu;
2576*0Sstevel@tonic-gate 	cpu_t *c;
2577*0Sstevel@tonic-gate 
2578*0Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&cpu_lock));
2579*0Sstevel@tonic-gate 	ASSERT(omni != NULL && omni->cyo_online != NULL);
2580*0Sstevel@tonic-gate 
2581*0Sstevel@tonic-gate 	idp->cyi_omni_hdlr = *omni;
2582*0Sstevel@tonic-gate 
2583*0Sstevel@tonic-gate 	c = cpu_list;
2584*0Sstevel@tonic-gate 	do {
2585*0Sstevel@tonic-gate 		if ((cpu = c->cpu_cyclic) == NULL)
2586*0Sstevel@tonic-gate 			continue;
2587*0Sstevel@tonic-gate 
2588*0Sstevel@tonic-gate 		if (cpu->cyp_state != CYS_ONLINE) {
2589*0Sstevel@tonic-gate 			ASSERT(cpu->cyp_state == CYS_OFFLINE);
2590*0Sstevel@tonic-gate 			continue;
2591*0Sstevel@tonic-gate 		}
2592*0Sstevel@tonic-gate 
2593*0Sstevel@tonic-gate 		cyclic_omni_start(idp, cpu);
2594*0Sstevel@tonic-gate 	} while ((c = c->cpu_next) != cpu_list);
2595*0Sstevel@tonic-gate 
2596*0Sstevel@tonic-gate 	/*
2597*0Sstevel@tonic-gate 	 * We must have found at least one online CPU on which to run
2598*0Sstevel@tonic-gate 	 * this cyclic.
2599*0Sstevel@tonic-gate 	 */
2600*0Sstevel@tonic-gate 	ASSERT(idp->cyi_omni_list != NULL);
2601*0Sstevel@tonic-gate 	ASSERT(idp->cyi_cpu == NULL);
2602*0Sstevel@tonic-gate 
2603*0Sstevel@tonic-gate 	return ((uintptr_t)idp);
2604*0Sstevel@tonic-gate }
2605*0Sstevel@tonic-gate 
2606*0Sstevel@tonic-gate /*
2607*0Sstevel@tonic-gate  *  void cyclic_remove(cyclic_id_t)
2608*0Sstevel@tonic-gate  *
2609*0Sstevel@tonic-gate  *  Overview
2610*0Sstevel@tonic-gate  *
2611*0Sstevel@tonic-gate  *    cyclic_remove() will remove the specified cyclic from the system.
2612*0Sstevel@tonic-gate  *
2613*0Sstevel@tonic-gate  *  Arguments and notes
2614*0Sstevel@tonic-gate  *
2615*0Sstevel@tonic-gate  *    The only argument is a cyclic_id returned from either cyclic_add() or
2616*0Sstevel@tonic-gate  *    cyclic_add_omni().
2617*0Sstevel@tonic-gate  *
2618*0Sstevel@tonic-gate  *    By the time cyclic_remove() returns, the caller is guaranteed that the
2619*0Sstevel@tonic-gate  *    removed cyclic handler has completed execution (this is the same
2620*0Sstevel@tonic-gate  *    semantic that untimeout() provides).  As a result, cyclic_remove() may
2621*0Sstevel@tonic-gate  *    need to block, waiting for the removed cyclic to complete execution.
2622*0Sstevel@tonic-gate  *    This leads to an important constraint on the caller:  no lock may be
2623*0Sstevel@tonic-gate  *    held across cyclic_remove() that also may be acquired by a cyclic
2624*0Sstevel@tonic-gate  *    handler.
2625*0Sstevel@tonic-gate  *
2626*0Sstevel@tonic-gate  *  Return value
2627*0Sstevel@tonic-gate  *
2628*0Sstevel@tonic-gate  *    None; cyclic_remove() always succeeds.
2629*0Sstevel@tonic-gate  *
2630*0Sstevel@tonic-gate  *  Caller's context
2631*0Sstevel@tonic-gate  *
2632*0Sstevel@tonic-gate  *    cpu_lock must be held by the caller, and the caller must not be in
2633*0Sstevel@tonic-gate  *    interrupt context.  The caller may not hold any locks which are also
2634*0Sstevel@tonic-gate  *    grabbed by any cyclic handler.  See "Arguments and notes", above.
2635*0Sstevel@tonic-gate  */
2636*0Sstevel@tonic-gate void
2637*0Sstevel@tonic-gate cyclic_remove(cyclic_id_t id)
2638*0Sstevel@tonic-gate {
2639*0Sstevel@tonic-gate 	cyc_id_t *idp = (cyc_id_t *)id;
2640*0Sstevel@tonic-gate 	cyc_id_t *prev = idp->cyi_prev, *next = idp->cyi_next;
2641*0Sstevel@tonic-gate 	cyc_cpu_t *cpu = idp->cyi_cpu;
2642*0Sstevel@tonic-gate 
2643*0Sstevel@tonic-gate 	CYC_PTRACE("remove", idp, idp->cyi_cpu);
2644*0Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&cpu_lock));
2645*0Sstevel@tonic-gate 
2646*0Sstevel@tonic-gate 	if (cpu != NULL) {
2647*0Sstevel@tonic-gate 		(void) cyclic_remove_here(cpu, idp->cyi_ndx, NULL, CY_WAIT);
2648*0Sstevel@tonic-gate 	} else {
2649*0Sstevel@tonic-gate 		ASSERT(idp->cyi_omni_list != NULL);
2650*0Sstevel@tonic-gate 		while (idp->cyi_omni_list != NULL)
2651*0Sstevel@tonic-gate 			cyclic_omni_stop(idp, idp->cyi_omni_list->cyo_cpu);
2652*0Sstevel@tonic-gate 	}
2653*0Sstevel@tonic-gate 
2654*0Sstevel@tonic-gate 	if (prev != NULL) {
2655*0Sstevel@tonic-gate 		ASSERT(cyclic_id_head != idp);
2656*0Sstevel@tonic-gate 		prev->cyi_next = next;
2657*0Sstevel@tonic-gate 	} else {
2658*0Sstevel@tonic-gate 		ASSERT(cyclic_id_head == idp);
2659*0Sstevel@tonic-gate 		cyclic_id_head = next;
2660*0Sstevel@tonic-gate 	}
2661*0Sstevel@tonic-gate 
2662*0Sstevel@tonic-gate 	if (next != NULL)
2663*0Sstevel@tonic-gate 		next->cyi_prev = prev;
2664*0Sstevel@tonic-gate 
2665*0Sstevel@tonic-gate 	kmem_cache_free(cyclic_id_cache, idp);
2666*0Sstevel@tonic-gate }
2667*0Sstevel@tonic-gate 
2668*0Sstevel@tonic-gate /*
2669*0Sstevel@tonic-gate  *  void cyclic_bind(cyclic_id_t, cpu_t *, cpupart_t *)
2670*0Sstevel@tonic-gate  *
2671*0Sstevel@tonic-gate  *  Overview
2672*0Sstevel@tonic-gate  *
2673*0Sstevel@tonic-gate  *    cyclic_bind() atomically changes the CPU and CPU partition bindings
2674*0Sstevel@tonic-gate  *    of a cyclic.
2675*0Sstevel@tonic-gate  *
2676*0Sstevel@tonic-gate  *  Arguments and notes
2677*0Sstevel@tonic-gate  *
2678*0Sstevel@tonic-gate  *    The first argument is a cyclic_id retuned from cyclic_add().
2679*0Sstevel@tonic-gate  *    cyclic_bind() may _not_ be called on a cyclic_id returned from
2680*0Sstevel@tonic-gate  *    cyclic_add_omni().
2681*0Sstevel@tonic-gate  *
2682*0Sstevel@tonic-gate  *    The second argument specifies the CPU to which to bind the specified
2683*0Sstevel@tonic-gate  *    cyclic.  If the specified cyclic is bound to a CPU other than the one
2684*0Sstevel@tonic-gate  *    specified, it will be unbound from its bound CPU.  Unbinding the cyclic
2685*0Sstevel@tonic-gate  *    from its CPU may cause it to be juggled to another CPU.  If the specified
2686*0Sstevel@tonic-gate  *    CPU is non-NULL, the cyclic will be subsequently rebound to the specified
2687*0Sstevel@tonic-gate  *    CPU.
2688*0Sstevel@tonic-gate  *
2689*0Sstevel@tonic-gate  *    If a CPU with bound cyclics is transitioned into the P_NOINTR state,
2690*0Sstevel@tonic-gate  *    only cyclics not bound to the CPU can be juggled away; CPU-bound cyclics
2691*0Sstevel@tonic-gate  *    will continue to fire on the P_NOINTR CPU.  A CPU with bound cyclics
2692*0Sstevel@tonic-gate  *    cannot be offlined (attempts to offline the CPU will return EBUSY).
2693*0Sstevel@tonic-gate  *    Likewise, cyclics may not be bound to an offline CPU; if the caller
2694*0Sstevel@tonic-gate  *    attempts to bind a cyclic to an offline CPU, the cyclic subsystem will
2695*0Sstevel@tonic-gate  *    panic.
2696*0Sstevel@tonic-gate  *
2697*0Sstevel@tonic-gate  *    The third argument specifies the CPU partition to which to bind the
2698*0Sstevel@tonic-gate  *    specified cyclic.  If the specified cyclic is bound to a CPU partition
2699*0Sstevel@tonic-gate  *    other than the one specified, it will be unbound from its bound
2700*0Sstevel@tonic-gate  *    partition.  Unbinding the cyclic from its CPU partition may cause it
2701*0Sstevel@tonic-gate  *    to be juggled to another CPU.  If the specified CPU partition is
2702*0Sstevel@tonic-gate  *    non-NULL, the cyclic will be subsequently rebound to the specified CPU
2703*0Sstevel@tonic-gate  *    partition.
2704*0Sstevel@tonic-gate  *
2705*0Sstevel@tonic-gate  *    It is the caller's responsibility to assure that the specified CPU
2706*0Sstevel@tonic-gate  *    partition contains a CPU.  If it does not, the cyclic subsystem will
2707*0Sstevel@tonic-gate  *    panic.  A CPU partition with bound cyclics cannot be destroyed (attempts
2708*0Sstevel@tonic-gate  *    to destroy the partition will return EBUSY).  If a CPU with
2709*0Sstevel@tonic-gate  *    partition-bound cyclics is transitioned into the P_NOINTR state, cyclics
2710*0Sstevel@tonic-gate  *    bound to the CPU's partition (but not bound to the CPU) will be juggled
2711*0Sstevel@tonic-gate  *    away only if there exists another CPU in the partition in the P_ONLINE
2712*0Sstevel@tonic-gate  *    state.
2713*0Sstevel@tonic-gate  *
2714*0Sstevel@tonic-gate  *    It is the caller's responsibility to assure that the specified CPU and
2715*0Sstevel@tonic-gate  *    CPU partition are self-consistent.  If both parameters are non-NULL,
2716*0Sstevel@tonic-gate  *    and the specified CPU partition does not contain the specified CPU, the
2717*0Sstevel@tonic-gate  *    cyclic subsystem will panic.
2718*0Sstevel@tonic-gate  *
2719*0Sstevel@tonic-gate  *    It is the caller's responsibility to assure that the specified CPU has
2720*0Sstevel@tonic-gate  *    been configured with respect to the cyclic subsystem.  Generally, this
2721*0Sstevel@tonic-gate  *    is always true for valid, on-line CPUs.  The only periods of time during
2722*0Sstevel@tonic-gate  *    which this may not be true are during MP boot (i.e. after cyclic_init()
2723*0Sstevel@tonic-gate  *    is called but before cyclic_mp_init() is called) or during dynamic
2724*0Sstevel@tonic-gate  *    reconfiguration; cyclic_bind() should only be called with great care
2725*0Sstevel@tonic-gate  *    from these contexts.
2726*0Sstevel@tonic-gate  *
2727*0Sstevel@tonic-gate  *  Return value
2728*0Sstevel@tonic-gate  *
2729*0Sstevel@tonic-gate  *    None; cyclic_bind() always succeeds.
2730*0Sstevel@tonic-gate  *
2731*0Sstevel@tonic-gate  *  Caller's context
2732*0Sstevel@tonic-gate  *
2733*0Sstevel@tonic-gate  *    cpu_lock must be held by the caller, and the caller must not be in
2734*0Sstevel@tonic-gate  *    interrupt context.  The caller may not hold any locks which are also
2735*0Sstevel@tonic-gate  *    grabbed by any cyclic handler.
2736*0Sstevel@tonic-gate  */
2737*0Sstevel@tonic-gate void
2738*0Sstevel@tonic-gate cyclic_bind(cyclic_id_t id, cpu_t *d, cpupart_t *part)
2739*0Sstevel@tonic-gate {
2740*0Sstevel@tonic-gate 	cyc_id_t *idp = (cyc_id_t *)id;
2741*0Sstevel@tonic-gate 	cyc_cpu_t *cpu = idp->cyi_cpu;
2742*0Sstevel@tonic-gate 	cpu_t *c;
2743*0Sstevel@tonic-gate 	uint16_t flags;
2744*0Sstevel@tonic-gate 
2745*0Sstevel@tonic-gate 	CYC_PTRACE("bind", d, part);
2746*0Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&cpu_lock));
2747*0Sstevel@tonic-gate 	ASSERT(part == NULL || d == NULL || d->cpu_part == part);
2748*0Sstevel@tonic-gate 
2749*0Sstevel@tonic-gate 	if (cpu == NULL) {
2750*0Sstevel@tonic-gate 		ASSERT(idp->cyi_omni_list != NULL);
2751*0Sstevel@tonic-gate 		panic("attempt to change binding of omnipresent cyclic");
2752*0Sstevel@tonic-gate 	}
2753*0Sstevel@tonic-gate 
2754*0Sstevel@tonic-gate 	c = cpu->cyp_cpu;
2755*0Sstevel@tonic-gate 	flags = cpu->cyp_cyclics[idp->cyi_ndx].cy_flags;
2756*0Sstevel@tonic-gate 
2757*0Sstevel@tonic-gate 	if (c != d && (flags & CYF_CPU_BOUND))
2758*0Sstevel@tonic-gate 		cyclic_unbind_cpu(id);
2759*0Sstevel@tonic-gate 
2760*0Sstevel@tonic-gate 	/*
2761*0Sstevel@tonic-gate 	 * Reload our cpu (we may have migrated).  We don't have to reload
2762*0Sstevel@tonic-gate 	 * the flags field here; if we were CYF_PART_BOUND on entry, we are
2763*0Sstevel@tonic-gate 	 * CYF_PART_BOUND now.
2764*0Sstevel@tonic-gate 	 */
2765*0Sstevel@tonic-gate 	cpu = idp->cyi_cpu;
2766*0Sstevel@tonic-gate 	c = cpu->cyp_cpu;
2767*0Sstevel@tonic-gate 
2768*0Sstevel@tonic-gate 	if (part != c->cpu_part && (flags & CYF_PART_BOUND))
2769*0Sstevel@tonic-gate 		cyclic_unbind_cpupart(id);
2770*0Sstevel@tonic-gate 
2771*0Sstevel@tonic-gate 	/*
2772*0Sstevel@tonic-gate 	 * Now reload the flags field, asserting that if we are CPU bound,
2773*0Sstevel@tonic-gate 	 * the CPU was specified (and likewise, if we are partition bound,
2774*0Sstevel@tonic-gate 	 * the partition was specified).
2775*0Sstevel@tonic-gate 	 */
2776*0Sstevel@tonic-gate 	cpu = idp->cyi_cpu;
2777*0Sstevel@tonic-gate 	c = cpu->cyp_cpu;
2778*0Sstevel@tonic-gate 	flags = cpu->cyp_cyclics[idp->cyi_ndx].cy_flags;
2779*0Sstevel@tonic-gate 	ASSERT(!(flags & CYF_CPU_BOUND) || c == d);
2780*0Sstevel@tonic-gate 	ASSERT(!(flags & CYF_PART_BOUND) || c->cpu_part == part);
2781*0Sstevel@tonic-gate 
2782*0Sstevel@tonic-gate 	if (!(flags & CYF_CPU_BOUND) && d != NULL)
2783*0Sstevel@tonic-gate 		cyclic_bind_cpu(id, d);
2784*0Sstevel@tonic-gate 
2785*0Sstevel@tonic-gate 	if (!(flags & CYF_PART_BOUND) && part != NULL)
2786*0Sstevel@tonic-gate 		cyclic_bind_cpupart(id, part);
2787*0Sstevel@tonic-gate }
2788*0Sstevel@tonic-gate 
2789*0Sstevel@tonic-gate hrtime_t
2790*0Sstevel@tonic-gate cyclic_getres()
2791*0Sstevel@tonic-gate {
2792*0Sstevel@tonic-gate 	return (cyclic_resolution);
2793*0Sstevel@tonic-gate }
2794*0Sstevel@tonic-gate 
2795*0Sstevel@tonic-gate void
2796*0Sstevel@tonic-gate cyclic_init(cyc_backend_t *be, hrtime_t resolution)
2797*0Sstevel@tonic-gate {
2798*0Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&cpu_lock));
2799*0Sstevel@tonic-gate 
2800*0Sstevel@tonic-gate 	CYC_PTRACE("init", be, resolution);
2801*0Sstevel@tonic-gate 	cyclic_resolution = resolution;
2802*0Sstevel@tonic-gate 
2803*0Sstevel@tonic-gate 	/*
2804*0Sstevel@tonic-gate 	 * Copy the passed cyc_backend into the backend template.  This must
2805*0Sstevel@tonic-gate 	 * be done before the CPU can be configured.
2806*0Sstevel@tonic-gate 	 */
2807*0Sstevel@tonic-gate 	bcopy(be, &cyclic_backend, sizeof (cyc_backend_t));
2808*0Sstevel@tonic-gate 
2809*0Sstevel@tonic-gate 	/*
2810*0Sstevel@tonic-gate 	 * It's safe to look at the "CPU" pointer without disabling kernel
2811*0Sstevel@tonic-gate 	 * preemption; cyclic_init() is called only during startup by the
2812*0Sstevel@tonic-gate 	 * cyclic backend.
2813*0Sstevel@tonic-gate 	 */
2814*0Sstevel@tonic-gate 	cyclic_configure(CPU);
2815*0Sstevel@tonic-gate 	cyclic_online(CPU);
2816*0Sstevel@tonic-gate }
2817*0Sstevel@tonic-gate 
2818*0Sstevel@tonic-gate /*
2819*0Sstevel@tonic-gate  * It is assumed that cyclic_mp_init() is called some time after cyclic
2820*0Sstevel@tonic-gate  * init (and therefore, after cpu0 has been initialized).  We grab cpu_lock,
2821*0Sstevel@tonic-gate  * find the already initialized CPU, and initialize every other CPU with the
2822*0Sstevel@tonic-gate  * same backend.  Finally, we register a cpu_setup function.
2823*0Sstevel@tonic-gate  */
2824*0Sstevel@tonic-gate void
2825*0Sstevel@tonic-gate cyclic_mp_init()
2826*0Sstevel@tonic-gate {
2827*0Sstevel@tonic-gate 	cpu_t *c;
2828*0Sstevel@tonic-gate 
2829*0Sstevel@tonic-gate 	mutex_enter(&cpu_lock);
2830*0Sstevel@tonic-gate 
2831*0Sstevel@tonic-gate 	c = cpu_list;
2832*0Sstevel@tonic-gate 	do {
2833*0Sstevel@tonic-gate 		if (c->cpu_cyclic == NULL) {
2834*0Sstevel@tonic-gate 			cyclic_configure(c);
2835*0Sstevel@tonic-gate 			cyclic_online(c);
2836*0Sstevel@tonic-gate 		}
2837*0Sstevel@tonic-gate 	} while ((c = c->cpu_next) != cpu_list);
2838*0Sstevel@tonic-gate 
2839*0Sstevel@tonic-gate 	register_cpu_setup_func((cpu_setup_func_t *)cyclic_cpu_setup, NULL);
2840*0Sstevel@tonic-gate 	mutex_exit(&cpu_lock);
2841*0Sstevel@tonic-gate }
2842*0Sstevel@tonic-gate 
2843*0Sstevel@tonic-gate /*
2844*0Sstevel@tonic-gate  *  int cyclic_juggle(cpu_t *)
2845*0Sstevel@tonic-gate  *
2846*0Sstevel@tonic-gate  *  Overview
2847*0Sstevel@tonic-gate  *
2848*0Sstevel@tonic-gate  *    cyclic_juggle() juggles as many cyclics as possible away from the
2849*0Sstevel@tonic-gate  *    specified CPU; all remaining cyclics on the CPU will either be CPU-
2850*0Sstevel@tonic-gate  *    or partition-bound.
2851*0Sstevel@tonic-gate  *
2852*0Sstevel@tonic-gate  *  Arguments and notes
2853*0Sstevel@tonic-gate  *
2854*0Sstevel@tonic-gate  *    The only argument to cyclic_juggle() is the CPU from which cyclics
2855*0Sstevel@tonic-gate  *    should be juggled.  CPU-bound cyclics are never juggled; partition-bound
2856*0Sstevel@tonic-gate  *    cyclics are only juggled if the specified CPU is in the P_NOINTR state
2857*0Sstevel@tonic-gate  *    and there exists a P_ONLINE CPU in the partition.  The cyclic subsystem
2858*0Sstevel@tonic-gate  *    assures that a cyclic will never fire late or spuriously, even while
2859*0Sstevel@tonic-gate  *    being juggled.
2860*0Sstevel@tonic-gate  *
2861*0Sstevel@tonic-gate  *  Return value
2862*0Sstevel@tonic-gate  *
2863*0Sstevel@tonic-gate  *    cyclic_juggle() returns a non-zero value if all cyclics were able to
2864*0Sstevel@tonic-gate  *    be juggled away from the CPU, and zero if one or more cyclics could
2865*0Sstevel@tonic-gate  *    not be juggled away.
2866*0Sstevel@tonic-gate  *
2867*0Sstevel@tonic-gate  *  Caller's context
2868*0Sstevel@tonic-gate  *
2869*0Sstevel@tonic-gate  *    cpu_lock must be held by the caller, and the caller must not be in
2870*0Sstevel@tonic-gate  *    interrupt context.  The caller may not hold any locks which are also
2871*0Sstevel@tonic-gate  *    grabbed by any cyclic handler.  While cyclic_juggle() _may_ be called
2872*0Sstevel@tonic-gate  *    in any context satisfying these constraints, it _must_ be called
2873*0Sstevel@tonic-gate  *    immediately after clearing CPU_ENABLE (i.e. before dropping cpu_lock).
2874*0Sstevel@tonic-gate  *    Failure to do so could result in an assertion failure in the cyclic
2875*0Sstevel@tonic-gate  *    subsystem.
2876*0Sstevel@tonic-gate  */
2877*0Sstevel@tonic-gate int
2878*0Sstevel@tonic-gate cyclic_juggle(cpu_t *c)
2879*0Sstevel@tonic-gate {
2880*0Sstevel@tonic-gate 	cyc_cpu_t *cpu = c->cpu_cyclic;
2881*0Sstevel@tonic-gate 	cyc_id_t *idp;
2882*0Sstevel@tonic-gate 	int all_juggled = 1;
2883*0Sstevel@tonic-gate 
2884*0Sstevel@tonic-gate 	CYC_PTRACE1("juggle", c);
2885*0Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&cpu_lock));
2886*0Sstevel@tonic-gate 
2887*0Sstevel@tonic-gate 	/*
2888*0Sstevel@tonic-gate 	 * We'll go through each cyclic on the CPU, attempting to juggle
2889*0Sstevel@tonic-gate 	 * each one elsewhere.
2890*0Sstevel@tonic-gate 	 */
2891*0Sstevel@tonic-gate 	for (idp = cyclic_id_head; idp != NULL; idp = idp->cyi_next) {
2892*0Sstevel@tonic-gate 		if (idp->cyi_cpu != cpu)
2893*0Sstevel@tonic-gate 			continue;
2894*0Sstevel@tonic-gate 
2895*0Sstevel@tonic-gate 		if (cyclic_juggle_one(idp) == 0) {
2896*0Sstevel@tonic-gate 			all_juggled = 0;
2897*0Sstevel@tonic-gate 			continue;
2898*0Sstevel@tonic-gate 		}
2899*0Sstevel@tonic-gate 
2900*0Sstevel@tonic-gate 		ASSERT(idp->cyi_cpu != cpu);
2901*0Sstevel@tonic-gate 	}
2902*0Sstevel@tonic-gate 
2903*0Sstevel@tonic-gate 	return (all_juggled);
2904*0Sstevel@tonic-gate }
2905*0Sstevel@tonic-gate 
2906*0Sstevel@tonic-gate /*
2907*0Sstevel@tonic-gate  *  int cyclic_offline(cpu_t *)
2908*0Sstevel@tonic-gate  *
2909*0Sstevel@tonic-gate  *  Overview
2910*0Sstevel@tonic-gate  *
2911*0Sstevel@tonic-gate  *    cyclic_offline() offlines the cyclic subsystem on the specified CPU.
2912*0Sstevel@tonic-gate  *
2913*0Sstevel@tonic-gate  *  Arguments and notes
2914*0Sstevel@tonic-gate  *
2915*0Sstevel@tonic-gate  *    The only argument to cyclic_offline() is a CPU to offline.
2916*0Sstevel@tonic-gate  *    cyclic_offline() will attempt to juggle cyclics away from the specified
2917*0Sstevel@tonic-gate  *    CPU.
2918*0Sstevel@tonic-gate  *
2919*0Sstevel@tonic-gate  *  Return value
2920*0Sstevel@tonic-gate  *
2921*0Sstevel@tonic-gate  *    cyclic_offline() returns 1 if all cyclics on the CPU were juggled away
2922*0Sstevel@tonic-gate  *    and the cyclic subsystem on the CPU was successfully offlines.
2923*0Sstevel@tonic-gate  *    cyclic_offline returns 0 if some cyclics remain, blocking the cyclic
2924*0Sstevel@tonic-gate  *    offline operation.  All remaining cyclics on the CPU will either be
2925*0Sstevel@tonic-gate  *    CPU- or partition-bound.
2926*0Sstevel@tonic-gate  *
2927*0Sstevel@tonic-gate  *    See the "Arguments and notes" of cyclic_juggle(), below, for more detail
2928*0Sstevel@tonic-gate  *    on cyclic juggling.
2929*0Sstevel@tonic-gate  *
2930*0Sstevel@tonic-gate  *  Caller's context
2931*0Sstevel@tonic-gate  *
2932*0Sstevel@tonic-gate  *    The only caller of cyclic_offline() should be the processor management
2933*0Sstevel@tonic-gate  *    subsystem.  It is expected that the caller of cyclic_offline() will
2934*0Sstevel@tonic-gate  *    offline the CPU immediately after cyclic_offline() returns success (i.e.
2935*0Sstevel@tonic-gate  *    before dropping cpu_lock).  Moreover, it is expected that the caller will
2936*0Sstevel@tonic-gate  *    fail the CPU offline operation if cyclic_offline() returns failure.
2937*0Sstevel@tonic-gate  */
2938*0Sstevel@tonic-gate int
2939*0Sstevel@tonic-gate cyclic_offline(cpu_t *c)
2940*0Sstevel@tonic-gate {
2941*0Sstevel@tonic-gate 	cyc_cpu_t *cpu = c->cpu_cyclic;
2942*0Sstevel@tonic-gate 	cyc_id_t *idp;
2943*0Sstevel@tonic-gate 
2944*0Sstevel@tonic-gate 	CYC_PTRACE1("offline", cpu);
2945*0Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&cpu_lock));
2946*0Sstevel@tonic-gate 
2947*0Sstevel@tonic-gate 	if (!cyclic_juggle(c))
2948*0Sstevel@tonic-gate 		return (0);
2949*0Sstevel@tonic-gate 
2950*0Sstevel@tonic-gate 	/*
2951*0Sstevel@tonic-gate 	 * This CPU is headed offline; we need to now stop omnipresent
2952*0Sstevel@tonic-gate 	 * cyclic firing on this CPU.
2953*0Sstevel@tonic-gate 	 */
2954*0Sstevel@tonic-gate 	for (idp = cyclic_id_head; idp != NULL; idp = idp->cyi_next) {
2955*0Sstevel@tonic-gate 		if (idp->cyi_cpu != NULL)
2956*0Sstevel@tonic-gate 			continue;
2957*0Sstevel@tonic-gate 
2958*0Sstevel@tonic-gate 		/*
2959*0Sstevel@tonic-gate 		 * We cannot possibly be offlining the last CPU; cyi_omni_list
2960*0Sstevel@tonic-gate 		 * must be non-NULL.
2961*0Sstevel@tonic-gate 		 */
2962*0Sstevel@tonic-gate 		ASSERT(idp->cyi_omni_list != NULL);
2963*0Sstevel@tonic-gate 		cyclic_omni_stop(idp, cpu);
2964*0Sstevel@tonic-gate 	}
2965*0Sstevel@tonic-gate 
2966*0Sstevel@tonic-gate 	ASSERT(cpu->cyp_state == CYS_ONLINE);
2967*0Sstevel@tonic-gate 	cpu->cyp_state = CYS_OFFLINE;
2968*0Sstevel@tonic-gate 
2969*0Sstevel@tonic-gate 	return (1);
2970*0Sstevel@tonic-gate }
2971*0Sstevel@tonic-gate 
2972*0Sstevel@tonic-gate /*
2973*0Sstevel@tonic-gate  *  void cyclic_online(cpu_t *)
2974*0Sstevel@tonic-gate  *
2975*0Sstevel@tonic-gate  *  Overview
2976*0Sstevel@tonic-gate  *
2977*0Sstevel@tonic-gate  *    cyclic_online() onlines a CPU previously offlined with cyclic_offline().
2978*0Sstevel@tonic-gate  *
2979*0Sstevel@tonic-gate  *  Arguments and notes
2980*0Sstevel@tonic-gate  *
2981*0Sstevel@tonic-gate  *    cyclic_online()'s only argument is a CPU to online.  The specified
2982*0Sstevel@tonic-gate  *    CPU must have been previously offlined with cyclic_offline().  After
2983*0Sstevel@tonic-gate  *    cyclic_online() returns, the specified CPU will be eligible to execute
2984*0Sstevel@tonic-gate  *    cyclics.
2985*0Sstevel@tonic-gate  *
2986*0Sstevel@tonic-gate  *  Return value
2987*0Sstevel@tonic-gate  *
2988*0Sstevel@tonic-gate  *    None; cyclic_online() always succeeds.
2989*0Sstevel@tonic-gate  *
2990*0Sstevel@tonic-gate  *  Caller's context
2991*0Sstevel@tonic-gate  *
2992*0Sstevel@tonic-gate  *    cyclic_online() should only be called by the processor management
2993*0Sstevel@tonic-gate  *    subsystem; cpu_lock must be held.
2994*0Sstevel@tonic-gate  */
2995*0Sstevel@tonic-gate void
2996*0Sstevel@tonic-gate cyclic_online(cpu_t *c)
2997*0Sstevel@tonic-gate {
2998*0Sstevel@tonic-gate 	cyc_cpu_t *cpu = c->cpu_cyclic;
2999*0Sstevel@tonic-gate 	cyc_id_t *idp;
3000*0Sstevel@tonic-gate 
3001*0Sstevel@tonic-gate 	CYC_PTRACE1("online", cpu);
3002*0Sstevel@tonic-gate 	ASSERT(c->cpu_flags & CPU_ENABLE);
3003*0Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&cpu_lock));
3004*0Sstevel@tonic-gate 	ASSERT(cpu->cyp_state == CYS_OFFLINE);
3005*0Sstevel@tonic-gate 
3006*0Sstevel@tonic-gate 	cpu->cyp_state = CYS_ONLINE;
3007*0Sstevel@tonic-gate 
3008*0Sstevel@tonic-gate 	/*
3009*0Sstevel@tonic-gate 	 * Now that this CPU is open for business, we need to start firing
3010*0Sstevel@tonic-gate 	 * all omnipresent cyclics on it.
3011*0Sstevel@tonic-gate 	 */
3012*0Sstevel@tonic-gate 	for (idp = cyclic_id_head; idp != NULL; idp = idp->cyi_next) {
3013*0Sstevel@tonic-gate 		if (idp->cyi_cpu != NULL)
3014*0Sstevel@tonic-gate 			continue;
3015*0Sstevel@tonic-gate 
3016*0Sstevel@tonic-gate 		cyclic_omni_start(idp, cpu);
3017*0Sstevel@tonic-gate 	}
3018*0Sstevel@tonic-gate }
3019*0Sstevel@tonic-gate 
3020*0Sstevel@tonic-gate /*
3021*0Sstevel@tonic-gate  *  void cyclic_move_in(cpu_t *)
3022*0Sstevel@tonic-gate  *
3023*0Sstevel@tonic-gate  *  Overview
3024*0Sstevel@tonic-gate  *
3025*0Sstevel@tonic-gate  *    cyclic_move_in() is called by the CPU partition code immediately after
3026*0Sstevel@tonic-gate  *    the specified CPU has moved into a new partition.
3027*0Sstevel@tonic-gate  *
3028*0Sstevel@tonic-gate  *  Arguments and notes
3029*0Sstevel@tonic-gate  *
3030*0Sstevel@tonic-gate  *    The only argument to cyclic_move_in() is a CPU which has moved into a
3031*0Sstevel@tonic-gate  *    new partition.  If the specified CPU is P_ONLINE, and every other
3032*0Sstevel@tonic-gate  *    CPU in the specified CPU's new partition is P_NOINTR, cyclic_move_in()
3033*0Sstevel@tonic-gate  *    will juggle all partition-bound, CPU-unbound cyclics to the specified
3034*0Sstevel@tonic-gate  *    CPU.
3035*0Sstevel@tonic-gate  *
3036*0Sstevel@tonic-gate  *  Return value
3037*0Sstevel@tonic-gate  *
3038*0Sstevel@tonic-gate  *    None; cyclic_move_in() always succeeds.
3039*0Sstevel@tonic-gate  *
3040*0Sstevel@tonic-gate  *  Caller's context
3041*0Sstevel@tonic-gate  *
3042*0Sstevel@tonic-gate  *    cyclic_move_in() should _only_ be called immediately after a CPU has
3043*0Sstevel@tonic-gate  *    moved into a new partition, with cpu_lock held.  As with other calls
3044*0Sstevel@tonic-gate  *    into the cyclic subsystem, no lock may be held which is also grabbed
3045*0Sstevel@tonic-gate  *    by any cyclic handler.
3046*0Sstevel@tonic-gate  */
3047*0Sstevel@tonic-gate void
3048*0Sstevel@tonic-gate cyclic_move_in(cpu_t *d)
3049*0Sstevel@tonic-gate {
3050*0Sstevel@tonic-gate 	cyc_id_t *idp;
3051*0Sstevel@tonic-gate 	cyc_cpu_t *dest = d->cpu_cyclic;
3052*0Sstevel@tonic-gate 	cyclic_t *cyclic;
3053*0Sstevel@tonic-gate 	cpupart_t *part = d->cpu_part;
3054*0Sstevel@tonic-gate 
3055*0Sstevel@tonic-gate 	CYC_PTRACE("move-in", dest, part);
3056*0Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&cpu_lock));
3057*0Sstevel@tonic-gate 
3058*0Sstevel@tonic-gate 	/*
3059*0Sstevel@tonic-gate 	 * Look for CYF_PART_BOUND cyclics in the new partition.  If
3060*0Sstevel@tonic-gate 	 * we find one, check to see if it is currently on a CPU which has
3061*0Sstevel@tonic-gate 	 * interrupts disabled.  If it is (and if this CPU currently has
3062*0Sstevel@tonic-gate 	 * interrupts enabled), we'll juggle those cyclics over here.
3063*0Sstevel@tonic-gate 	 */
3064*0Sstevel@tonic-gate 	if (!(d->cpu_flags & CPU_ENABLE)) {
3065*0Sstevel@tonic-gate 		CYC_PTRACE1("move-in-none", dest);
3066*0Sstevel@tonic-gate 		return;
3067*0Sstevel@tonic-gate 	}
3068*0Sstevel@tonic-gate 
3069*0Sstevel@tonic-gate 	for (idp = cyclic_id_head; idp != NULL; idp = idp->cyi_next) {
3070*0Sstevel@tonic-gate 		cyc_cpu_t *cpu = idp->cyi_cpu;
3071*0Sstevel@tonic-gate 		cpu_t *c;
3072*0Sstevel@tonic-gate 
3073*0Sstevel@tonic-gate 		/*
3074*0Sstevel@tonic-gate 		 * Omnipresent cyclics are exempt from juggling.
3075*0Sstevel@tonic-gate 		 */
3076*0Sstevel@tonic-gate 		if (cpu == NULL)
3077*0Sstevel@tonic-gate 			continue;
3078*0Sstevel@tonic-gate 
3079*0Sstevel@tonic-gate 		c = cpu->cyp_cpu;
3080*0Sstevel@tonic-gate 
3081*0Sstevel@tonic-gate 		if (c->cpu_part != part || (c->cpu_flags & CPU_ENABLE))
3082*0Sstevel@tonic-gate 			continue;
3083*0Sstevel@tonic-gate 
3084*0Sstevel@tonic-gate 		cyclic = &cpu->cyp_cyclics[idp->cyi_ndx];
3085*0Sstevel@tonic-gate 
3086*0Sstevel@tonic-gate 		if (cyclic->cy_flags & CYF_CPU_BOUND)
3087*0Sstevel@tonic-gate 			continue;
3088*0Sstevel@tonic-gate 
3089*0Sstevel@tonic-gate 		/*
3090*0Sstevel@tonic-gate 		 * We know that this cyclic is bound to its processor set
3091*0Sstevel@tonic-gate 		 * (otherwise, it would not be on a CPU with interrupts
3092*0Sstevel@tonic-gate 		 * disabled); juggle it to our CPU.
3093*0Sstevel@tonic-gate 		 */
3094*0Sstevel@tonic-gate 		ASSERT(cyclic->cy_flags & CYF_PART_BOUND);
3095*0Sstevel@tonic-gate 		cyclic_juggle_one_to(idp, dest);
3096*0Sstevel@tonic-gate 	}
3097*0Sstevel@tonic-gate 
3098*0Sstevel@tonic-gate 	CYC_PTRACE1("move-in-done", dest);
3099*0Sstevel@tonic-gate }
3100*0Sstevel@tonic-gate 
3101*0Sstevel@tonic-gate /*
3102*0Sstevel@tonic-gate  *  int cyclic_move_out(cpu_t *)
3103*0Sstevel@tonic-gate  *
3104*0Sstevel@tonic-gate  *  Overview
3105*0Sstevel@tonic-gate  *
3106*0Sstevel@tonic-gate  *    cyclic_move_out() is called by the CPU partition code immediately before
3107*0Sstevel@tonic-gate  *    the specified CPU is to move out of its partition.
3108*0Sstevel@tonic-gate  *
3109*0Sstevel@tonic-gate  *  Arguments and notes
3110*0Sstevel@tonic-gate  *
3111*0Sstevel@tonic-gate  *    The only argument to cyclic_move_out() is a CPU which is to move out of
3112*0Sstevel@tonic-gate  *    its partition.
3113*0Sstevel@tonic-gate  *
3114*0Sstevel@tonic-gate  *    cyclic_move_out() will attempt to juggle away all partition-bound
3115*0Sstevel@tonic-gate  *    cyclics.  If the specified CPU is the last CPU in a partition with
3116*0Sstevel@tonic-gate  *    partition-bound cyclics, cyclic_move_out() will fail.  If there exists
3117*0Sstevel@tonic-gate  *    a partition-bound cyclic which is CPU-bound to the specified CPU,
3118*0Sstevel@tonic-gate  *    cyclic_move_out() will fail.
3119*0Sstevel@tonic-gate  *
3120*0Sstevel@tonic-gate  *    Note that cyclic_move_out() will _only_ attempt to juggle away
3121*0Sstevel@tonic-gate  *    partition-bound cyclics; CPU-bound cyclics which are not partition-bound
3122*0Sstevel@tonic-gate  *    and unbound cyclics are not affected by changing the partition
3123*0Sstevel@tonic-gate  *    affiliation of the CPU.
3124*0Sstevel@tonic-gate  *
3125*0Sstevel@tonic-gate  *  Return value
3126*0Sstevel@tonic-gate  *
3127*0Sstevel@tonic-gate  *    cyclic_move_out() returns 1 if all partition-bound cyclics on the CPU
3128*0Sstevel@tonic-gate  *    were juggled away; 0 if some cyclics remain.
3129*0Sstevel@tonic-gate  *
3130*0Sstevel@tonic-gate  *  Caller's context
3131*0Sstevel@tonic-gate  *
3132*0Sstevel@tonic-gate  *    cyclic_move_out() should _only_ be called immediately before a CPU has
3133*0Sstevel@tonic-gate  *    moved out of its partition, with cpu_lock held.  It is expected that
3134*0Sstevel@tonic-gate  *    the caller of cyclic_move_out() will change the processor set affiliation
3135*0Sstevel@tonic-gate  *    of the specified CPU immediately after cyclic_move_out() returns
3136*0Sstevel@tonic-gate  *    success (i.e. before dropping cpu_lock).  Moreover, it is expected that
3137*0Sstevel@tonic-gate  *    the caller will fail the CPU repartitioning operation if cyclic_move_out()
3138*0Sstevel@tonic-gate  *    returns failure.  As with other calls into the cyclic subsystem, no lock
3139*0Sstevel@tonic-gate  *    may be held which is also grabbed by any cyclic handler.
3140*0Sstevel@tonic-gate  */
3141*0Sstevel@tonic-gate int
3142*0Sstevel@tonic-gate cyclic_move_out(cpu_t *c)
3143*0Sstevel@tonic-gate {
3144*0Sstevel@tonic-gate 	cyc_id_t *idp;
3145*0Sstevel@tonic-gate 	cyc_cpu_t *cpu = c->cpu_cyclic, *dest;
3146*0Sstevel@tonic-gate 	cyclic_t *cyclic, *cyclics = cpu->cyp_cyclics;
3147*0Sstevel@tonic-gate 	cpupart_t *part = c->cpu_part;
3148*0Sstevel@tonic-gate 
3149*0Sstevel@tonic-gate 	CYC_PTRACE1("move-out", cpu);
3150*0Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&cpu_lock));
3151*0Sstevel@tonic-gate 
3152*0Sstevel@tonic-gate 	/*
3153*0Sstevel@tonic-gate 	 * If there are any CYF_PART_BOUND cyclics on this CPU, we need
3154*0Sstevel@tonic-gate 	 * to try to juggle them away.
3155*0Sstevel@tonic-gate 	 */
3156*0Sstevel@tonic-gate 	for (idp = cyclic_id_head; idp != NULL; idp = idp->cyi_next) {
3157*0Sstevel@tonic-gate 
3158*0Sstevel@tonic-gate 		if (idp->cyi_cpu != cpu)
3159*0Sstevel@tonic-gate 			continue;
3160*0Sstevel@tonic-gate 
3161*0Sstevel@tonic-gate 		cyclic = &cyclics[idp->cyi_ndx];
3162*0Sstevel@tonic-gate 
3163*0Sstevel@tonic-gate 		if (!(cyclic->cy_flags & CYF_PART_BOUND))
3164*0Sstevel@tonic-gate 			continue;
3165*0Sstevel@tonic-gate 
3166*0Sstevel@tonic-gate 		dest = cyclic_pick_cpu(part, c, c, cyclic->cy_flags);
3167*0Sstevel@tonic-gate 
3168*0Sstevel@tonic-gate 		if (dest == NULL) {
3169*0Sstevel@tonic-gate 			/*
3170*0Sstevel@tonic-gate 			 * We can't juggle this cyclic; we need to return
3171*0Sstevel@tonic-gate 			 * failure (we won't bother trying to juggle away
3172*0Sstevel@tonic-gate 			 * other cyclics).
3173*0Sstevel@tonic-gate 			 */
3174*0Sstevel@tonic-gate 			CYC_PTRACE("move-out-fail", cpu, idp);
3175*0Sstevel@tonic-gate 			return (0);
3176*0Sstevel@tonic-gate 		}
3177*0Sstevel@tonic-gate 		cyclic_juggle_one_to(idp, dest);
3178*0Sstevel@tonic-gate 	}
3179*0Sstevel@tonic-gate 
3180*0Sstevel@tonic-gate 	CYC_PTRACE1("move-out-done", cpu);
3181*0Sstevel@tonic-gate 	return (1);
3182*0Sstevel@tonic-gate }
3183*0Sstevel@tonic-gate 
3184*0Sstevel@tonic-gate /*
3185*0Sstevel@tonic-gate  *  void cyclic_suspend()
3186*0Sstevel@tonic-gate  *
3187*0Sstevel@tonic-gate  *  Overview
3188*0Sstevel@tonic-gate  *
3189*0Sstevel@tonic-gate  *    cyclic_suspend() suspends all cyclic activity throughout the cyclic
3190*0Sstevel@tonic-gate  *    subsystem.  It should be called only by subsystems which are attempting
3191*0Sstevel@tonic-gate  *    to suspend the entire system (e.g. checkpoint/resume, dynamic
3192*0Sstevel@tonic-gate  *    reconfiguration).
3193*0Sstevel@tonic-gate  *
3194*0Sstevel@tonic-gate  *  Arguments and notes
3195*0Sstevel@tonic-gate  *
3196*0Sstevel@tonic-gate  *    cyclic_suspend() takes no arguments.  Each CPU with an active cyclic
3197*0Sstevel@tonic-gate  *    disables its backend (offline CPUs disable their backends as part of
3198*0Sstevel@tonic-gate  *    the cyclic_offline() operation), thereby disabling future CY_HIGH_LEVEL
3199*0Sstevel@tonic-gate  *    interrupts.
3200*0Sstevel@tonic-gate  *
3201*0Sstevel@tonic-gate  *    Note that disabling CY_HIGH_LEVEL interrupts does not completely preclude
3202*0Sstevel@tonic-gate  *    cyclic handlers from being called after cyclic_suspend() returns:  if a
3203*0Sstevel@tonic-gate  *    CY_LOCK_LEVEL or CY_LOW_LEVEL interrupt thread was blocked at the time
3204*0Sstevel@tonic-gate  *    of cyclic_suspend(), cyclic handlers at its level may continue to be
3205*0Sstevel@tonic-gate  *    called after the interrupt thread becomes unblocked.  The
3206*0Sstevel@tonic-gate  *    post-cyclic_suspend() activity is bounded by the pend count on all
3207*0Sstevel@tonic-gate  *    cyclics at the time of cyclic_suspend().  Callers concerned with more
3208*0Sstevel@tonic-gate  *    than simply disabling future CY_HIGH_LEVEL interrupts must check for
3209*0Sstevel@tonic-gate  *    this condition.
3210*0Sstevel@tonic-gate  *
3211*0Sstevel@tonic-gate  *    On most platforms, timestamps from gethrtime() and gethrestime() are not
3212*0Sstevel@tonic-gate  *    guaranteed to monotonically increase between cyclic_suspend() and
3213*0Sstevel@tonic-gate  *    cyclic_resume().  However, timestamps are guaranteed to monotonically
3214*0Sstevel@tonic-gate  *    increase across the entire cyclic_suspend()/cyclic_resume() operation.
3215*0Sstevel@tonic-gate  *    That is, every timestamp obtained before cyclic_suspend() will be less
3216*0Sstevel@tonic-gate  *    than every timestamp obtained after cyclic_resume().
3217*0Sstevel@tonic-gate  *
3218*0Sstevel@tonic-gate  *  Return value
3219*0Sstevel@tonic-gate  *
3220*0Sstevel@tonic-gate  *    None; cyclic_suspend() always succeeds.
3221*0Sstevel@tonic-gate  *
3222*0Sstevel@tonic-gate  *  Caller's context
3223*0Sstevel@tonic-gate  *
3224*0Sstevel@tonic-gate  *    The cyclic subsystem must be configured on every valid CPU;
3225*0Sstevel@tonic-gate  *    cyclic_suspend() may not be called during boot or during dynamic
3226*0Sstevel@tonic-gate  *    reconfiguration.  Additionally, cpu_lock must be held, and the caller
3227*0Sstevel@tonic-gate  *    cannot be in high-level interrupt context.  However, unlike most other
3228*0Sstevel@tonic-gate  *    cyclic entry points, cyclic_suspend() may be called with locks held
3229*0Sstevel@tonic-gate  *    which are also acquired by CY_LOCK_LEVEL or CY_LOW_LEVEL cyclic
3230*0Sstevel@tonic-gate  *    handlers.
3231*0Sstevel@tonic-gate  */
3232*0Sstevel@tonic-gate void
3233*0Sstevel@tonic-gate cyclic_suspend()
3234*0Sstevel@tonic-gate {
3235*0Sstevel@tonic-gate 	cpu_t *c;
3236*0Sstevel@tonic-gate 	cyc_cpu_t *cpu;
3237*0Sstevel@tonic-gate 	cyc_xcallarg_t arg;
3238*0Sstevel@tonic-gate 	cyc_backend_t *be;
3239*0Sstevel@tonic-gate 
3240*0Sstevel@tonic-gate 	CYC_PTRACE0("suspend");
3241*0Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&cpu_lock));
3242*0Sstevel@tonic-gate 	c = cpu_list;
3243*0Sstevel@tonic-gate 
3244*0Sstevel@tonic-gate 	do {
3245*0Sstevel@tonic-gate 		cpu = c->cpu_cyclic;
3246*0Sstevel@tonic-gate 		be = cpu->cyp_backend;
3247*0Sstevel@tonic-gate 		arg.cyx_cpu = cpu;
3248*0Sstevel@tonic-gate 
3249*0Sstevel@tonic-gate 		be->cyb_xcall(be->cyb_arg, c,
3250*0Sstevel@tonic-gate 		    (cyc_func_t)cyclic_suspend_xcall, &arg);
3251*0Sstevel@tonic-gate 	} while ((c = c->cpu_next) != cpu_list);
3252*0Sstevel@tonic-gate }
3253*0Sstevel@tonic-gate 
3254*0Sstevel@tonic-gate /*
3255*0Sstevel@tonic-gate  *  void cyclic_resume()
3256*0Sstevel@tonic-gate  *
3257*0Sstevel@tonic-gate  *    cyclic_resume() resumes all cyclic activity throughout the cyclic
3258*0Sstevel@tonic-gate  *    subsystem.  It should be called only by system-suspending subsystems.
3259*0Sstevel@tonic-gate  *
3260*0Sstevel@tonic-gate  *  Arguments and notes
3261*0Sstevel@tonic-gate  *
3262*0Sstevel@tonic-gate  *    cyclic_resume() takes no arguments.  Each CPU with an active cyclic
3263*0Sstevel@tonic-gate  *    reenables and reprograms its backend (offline CPUs are not reenabled).
3264*0Sstevel@tonic-gate  *    On most platforms, timestamps from gethrtime() and gethrestime() are not
3265*0Sstevel@tonic-gate  *    guaranteed to monotonically increase between cyclic_suspend() and
3266*0Sstevel@tonic-gate  *    cyclic_resume().  However, timestamps are guaranteed to monotonically
3267*0Sstevel@tonic-gate  *    increase across the entire cyclic_suspend()/cyclic_resume() operation.
3268*0Sstevel@tonic-gate  *    That is, every timestamp obtained before cyclic_suspend() will be less
3269*0Sstevel@tonic-gate  *    than every timestamp obtained after cyclic_resume().
3270*0Sstevel@tonic-gate  *
3271*0Sstevel@tonic-gate  *  Return value
3272*0Sstevel@tonic-gate  *
3273*0Sstevel@tonic-gate  *    None; cyclic_resume() always succeeds.
3274*0Sstevel@tonic-gate  *
3275*0Sstevel@tonic-gate  *  Caller's context
3276*0Sstevel@tonic-gate  *
3277*0Sstevel@tonic-gate  *    The cyclic subsystem must be configured on every valid CPU;
3278*0Sstevel@tonic-gate  *    cyclic_resume() may not be called during boot or during dynamic
3279*0Sstevel@tonic-gate  *    reconfiguration.  Additionally, cpu_lock must be held, and the caller
3280*0Sstevel@tonic-gate  *    cannot be in high-level interrupt context.  However, unlike most other
3281*0Sstevel@tonic-gate  *    cyclic entry points, cyclic_resume() may be called with locks held which
3282*0Sstevel@tonic-gate  *    are also acquired by CY_LOCK_LEVEL or CY_LOW_LEVEL cyclic handlers.
3283*0Sstevel@tonic-gate  */
3284*0Sstevel@tonic-gate void
3285*0Sstevel@tonic-gate cyclic_resume()
3286*0Sstevel@tonic-gate {
3287*0Sstevel@tonic-gate 	cpu_t *c;
3288*0Sstevel@tonic-gate 	cyc_cpu_t *cpu;
3289*0Sstevel@tonic-gate 	cyc_xcallarg_t arg;
3290*0Sstevel@tonic-gate 	cyc_backend_t *be;
3291*0Sstevel@tonic-gate 
3292*0Sstevel@tonic-gate 	CYC_PTRACE0("resume");
3293*0Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&cpu_lock));
3294*0Sstevel@tonic-gate 
3295*0Sstevel@tonic-gate 	c = cpu_list;
3296*0Sstevel@tonic-gate 
3297*0Sstevel@tonic-gate 	do {
3298*0Sstevel@tonic-gate 		cpu = c->cpu_cyclic;
3299*0Sstevel@tonic-gate 		be = cpu->cyp_backend;
3300*0Sstevel@tonic-gate 		arg.cyx_cpu = cpu;
3301*0Sstevel@tonic-gate 
3302*0Sstevel@tonic-gate 		be->cyb_xcall(be->cyb_arg, c,
3303*0Sstevel@tonic-gate 		    (cyc_func_t)cyclic_resume_xcall, &arg);
3304*0Sstevel@tonic-gate 	} while ((c = c->cpu_next) != cpu_list);
3305*0Sstevel@tonic-gate }
3306