xref: /netbsd-src/external/cddl/osnet/dev/cyclic/cyclic.c (revision 5b28f239895d55856221c590945769250e289f5f)
1 /*	$NetBSD: cyclic.c,v 1.10 2024/09/08 09:36:46 rillig Exp $	*/
2 
3 /*
4  * CDDL HEADER START
5  *
6  * The contents of this file are subject to the terms of the
7  * Common Development and Distribution License, Version 1.0 only
8  * (the "License").  You may not use this file except in compliance
9  * with the License.
10  *
11  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
12  * or http://www.opensolaris.org/os/licensing.
13  * See the License for the specific language governing permissions
14  * and limitations under the License.
15  *
16  * When distributing Covered Code, include this CDDL HEADER in each
17  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
18  * If applicable, add the following below this CDDL HEADER, with the
19  * fields enclosed by brackets "[]" replaced with your own identifying
20  * information: Portions Copyright [yyyy] [name of copyright owner]
21  *
22  * CDDL HEADER END
23  *
24  * Portions Copyright 2008 John Birrell <jb@freebsd.org>
25  *
26  * $FreeBSD: head/sys/cddl/dev/cyclic/cyclic.c 227293 2011-11-07 06:44:47Z ed $
27  *
28  * This is a simplified version of the cyclic timer subsystem from
29  * OpenSolaris. In the FreeBSD version, we don't use interrupt levels.
30  */
31 
32 /*
33  * Copyright 2004 Sun Microsystems, Inc.  All rights reserved.
34  * Use is subject to license terms.
35  */
36 
37 /*
38  *  The Cyclic Subsystem
39  *  --------------------
40  *
41  *  Prehistory
42  *
43  *  Historically, most computer architectures have specified interval-based
44  *  timer parts (e.g. SPARCstation's counter/timer; Intel's i8254).  While
45  *  these parts deal in relative (i.e. not absolute) time values, they are
46  *  typically used by the operating system to implement the abstraction of
47  *  absolute time.  As a result, these parts cannot typically be reprogrammed
48  *  without introducing error in the system's notion of time.
49  *
50  *  Starting in about 1994, chip architectures began specifying high resolution
51  *  timestamp registers.  As of this writing (1999), all major chip families
52  *  (UltraSPARC, PentiumPro, MIPS, PowerPC, Alpha) have high resolution
53  *  timestamp registers, and two (UltraSPARC and MIPS) have added the capacity
54  *  to interrupt based on timestamp values.  These timestamp-compare registers
55  *  present a time-based interrupt source which can be reprogrammed arbitrarily
56  *  often without introducing error.  Given the low cost of implementing such a
57  *  timestamp-compare register (and the tangible benefit of eliminating
58  *  discrete timer parts), it is reasonable to expect that future chip
59  *  architectures will adopt this feature.
60  *
61  *  The cyclic subsystem has been designed to take advantage of chip
62  *  architectures with the capacity to interrupt based on absolute, high
63  *  resolution values of time.
64  *
65  *  Subsystem Overview
66  *
67  *  The cyclic subsystem is a low-level kernel subsystem designed to provide
68  *  arbitrarily high resolution, per-CPU interval timers (to avoid colliding
69  *  with existing terms, we dub such an interval timer a "cyclic").
70  *  Alternatively, a cyclic may be specified to be "omnipresent", denoting
71  *  firing on all online CPUs.
72  *
73  *  Cyclic Subsystem Interface Overview
74  *  -----------------------------------
75  *
76  *  The cyclic subsystem has interfaces with the kernel at-large, with other
77  *  kernel subsystems (e.g. the processor management subsystem, the checkpoint
78  *  resume subsystem) and with the platform (the cyclic backend).  Each
79  *  of these interfaces is given a brief synopsis here, and is described
80  *  in full above the interface's implementation.
81  *
82  *  The following diagram displays the cyclic subsystem's interfaces to
83  *  other kernel components.  The arrows denote a "calls" relationship, with
84  *  the large arrow indicating the cyclic subsystem's consumer interface.
85  *  Each arrow is labeled with the section in which the corresponding
86  *  interface is described.
87  *
88  *           Kernel at-large consumers
89  *           -----------++------------
90  *                      ||
91  *                      ||
92  *                     _||_
93  *                     \  /
94  *                      \/
95  *            +---------------------+
96  *            |                     |
97  *            |  Cyclic subsystem   |<-----------  Other kernel subsystems
98  *            |                     |
99  *            +---------------------+
100  *                   ^       |
101  *                   |       |
102  *                   |       |
103  *                   |       v
104  *            +---------------------+
105  *            |                     |
106  *            |   Cyclic backend    |
107  *            | (platform specific) |
108  *            |                     |
109  *            +---------------------+
110  *
111  *
112  *  Kernel At-Large Interfaces
113  *
114  *      cyclic_add()         <-- Creates a cyclic
115  *      cyclic_add_omni()    <-- Creates an omnipresent cyclic
116  *      cyclic_remove()      <-- Removes a cyclic
117  *
118  *  Backend Interfaces
119  *
120  *      cyclic_init()        <-- Initializes the cyclic subsystem
121  *      cyclic_fire()        <-- Interrupt entry point
122  *
123  *  The backend-supplied interfaces (through the cyc_backend structure) are
124  *  documented in detail in <sys/cyclic_impl.h>
125  *
126  *
127  *  Cyclic Subsystem Implementation Overview
128  *  ----------------------------------------
129  *
130  *  The cyclic subsystem is designed to minimize interference between cyclics
131  *  on different CPUs.  Thus, all of the cyclic subsystem's data structures
132  *  hang off of a per-CPU structure, cyc_cpu.
133  *
134  *  Each cyc_cpu has a power-of-two sized array of cyclic structures (the
135  *  cyp_cyclics member of the cyc_cpu structure).  If cyclic_add() is called
136  *  and there does not exist a free slot in the cyp_cyclics array, the size of
137  *  the array will be doubled.  The array will never shrink.  Cyclics are
138  *  referred to by their index in the cyp_cyclics array, which is of type
139  *  cyc_index_t.
140  *
141  *  The cyclics are kept sorted by expiration time in the cyc_cpu's heap.  The
142  *  heap is keyed by cyclic expiration time, with parents expiring earlier
143  *  than their children.
144  *
145  *  Heap Management
146  *
147  *  The heap is managed primarily by cyclic_fire().  Upon entry, cyclic_fire()
148  *  compares the root cyclic's expiration time to the current time.  If the
149  *  expiration time is in the past, cyclic_expire() is called on the root
150  *  cyclic.  Upon return from cyclic_expire(), the cyclic's new expiration time
151  *  is derived by adding its interval to its old expiration time, and a
152  *  downheap operation is performed.  After the downheap, cyclic_fire()
153  *  examines the (potentially changed) root cyclic, repeating the
154  *  cyclic_expire()/add interval/cyclic_downheap() sequence until the root
155  *  cyclic has an expiration time in the future.  This expiration time
156  *  (guaranteed to be the earliest in the heap) is then communicated to the
157  *  backend via cyb_reprogram.  Optimal backends will next call cyclic_fire()
158  *  shortly after the root cyclic's expiration time.
159  *
160  *  To allow efficient, deterministic downheap operations, we implement the
161  *  heap as an array (the cyp_heap member of the cyc_cpu structure), with each
162  *  element containing an index into the CPU's cyp_cyclics array.
163  *
164  *  The heap is laid out in the array according to the following:
165  *
166  *   1.  The root of the heap is always in the 0th element of the heap array
167  *   2.  The left and right children of the nth element are element
168  *       (((n + 1) << 1) - 1) and element ((n + 1) << 1), respectively.
169  *
170  *  This layout is standard (see, e.g., Cormen's "Algorithms"); the proof
171  *  that these constraints correctly lay out a heap (or indeed, any binary
172  *  tree) is trivial and left to the reader.
173  *
174  *  To see the heap by example, assume our cyclics array has the following
175  *  members (at time t):
176  *
177  *            cy_handler                          cy_expire
178  *            ---------------------------------------------
179  *     [ 0]   clock()                            t+10000000
180  *     [ 1]   deadman()                        t+1000000000
181  *     [ 2]   clock_highres_fire()                    t+100
182  *     [ 3]   clock_highres_fire()                   t+1000
183  *     [ 4]   clock_highres_fire()                    t+500
184  *     [ 5]   (free)                                     --
185  *     [ 6]   (free)                                     --
186  *     [ 7]   (free)                                     --
187  *
188  *  The heap array could be:
189  *
190  *                [0]   [1]   [2]   [3]   [4]   [5]   [6]   [7]
191  *              +-----+-----+-----+-----+-----+-----+-----+-----+
192  *              |     |     |     |     |     |     |     |     |
193  *              |  2  |  3  |  4  |  0  |  1  |  x  |  x  |  x  |
194  *              |     |     |     |     |     |     |     |     |
195  *              +-----+-----+-----+-----+-----+-----+-----+-----+
196  *
197  *  Graphically, this array corresponds to the following (excuse the ASCII art):
198  *
199  *                                       2
200  *                                       |
201  *                    +------------------+------------------+
202  *                    3                                     4
203  *                    |
204  *          +---------+--------+
205  *          0                  1
206  *
207  *  Note that the heap is laid out by layer:  all nodes at a given depth are
208  *  stored in consecutive elements of the array.  Moreover, layers of
209  *  consecutive depths are in adjacent element ranges.  This property
210  *  guarantees high locality of reference during downheap operations.
211  *  Specifically, we are guaranteed that we can downheap to a depth of
212  *
213  *      lg (cache_line_size / sizeof (cyc_index_t))
214  *
215  *  nodes with at most one cache miss.  On UltraSPARC (64 byte e-cache line
216  *  size), this corresponds to a depth of four nodes.  Thus, if there are
217  *  fewer than sixteen cyclics in the heap, downheaps on UltraSPARC miss at
218  *  most once in the e-cache.
219  *
220  *  Downheaps are required to compare siblings as they proceed down the
221  *  heap.  For downheaps proceeding beyond the one-cache-miss depth, every
222  *  access to a left child could potentially miss in the cache.  However,
223  *  if we assume
224  *
225  *      (cache_line_size / sizeof (cyc_index_t)) > 2,
226  *
227  *  then all siblings are guaranteed to be on the same cache line.  Thus, the
228  *  miss on the left child will guarantee a hit on the right child; downheaps
229  *  will incur at most one cache miss per layer beyond the one-cache-miss
230  *  depth.  The total number of cache misses for heap management during a
231  *  downheap operation is thus bounded by
232  *
233  *      lg (n) - lg (cache_line_size / sizeof (cyc_index_t))
234  *
235  *  Traditional pointer-based heaps are implemented without regard to
236  *  locality.  Downheaps can thus incur two cache misses per layer (one for
237  *  each child), but at most one cache miss at the root.  This yields a bound
238  *  of
239  *
240  *      2 * lg (n) - 1
241  *
242  *  on the total cache misses.
243  *
244  *  This difference may seem theoretically trivial (the difference is, after
245  *  all, constant), but can become substantial in practice -- especially for
246  *  caches with very large cache lines and high miss penalties (e.g. TLBs).
247  *
248  *  Heaps must always be full, balanced trees.  Heap management must therefore
249  *  track the next point-of-insertion into the heap.  In pointer-based heaps,
250  *  recomputing this point takes O(lg (n)).  Given the layout of the
251  *  array-based implementation, however, the next point-of-insertion is
252  *  always:
253  *
254  *      heap[number_of_elements]
255  *
256  *  We exploit this property by implementing the free-list in the usused
257  *  heap elements.  Heap insertion, therefore, consists only of filling in
258  *  the cyclic at cyp_cyclics[cyp_heap[number_of_elements]], incrementing
259  *  the number of elements, and performing an upheap.  Heap deletion consists
260  *  of decrementing the number of elements, swapping the to-be-deleted element
261  *  with the element at cyp_heap[number_of_elements], and downheaping.
262  *
263  *  Filling in more details in our earlier example:
264  *
265  *                                               +--- free list head
266  *                                               |
267  *                                               V
268  *
269  *                [0]   [1]   [2]   [3]   [4]   [5]   [6]   [7]
270  *              +-----+-----+-----+-----+-----+-----+-----+-----+
271  *              |     |     |     |     |     |     |     |     |
272  *              |  2  |  3  |  4  |  0  |  1  |  5  |  6  |  7  |
273  *              |     |     |     |     |     |     |     |     |
274  *              +-----+-----+-----+-----+-----+-----+-----+-----+
275  *
276  *  To insert into this heap, we would just need to fill in the cyclic at
277  *  cyp_cyclics[5], bump the number of elements (from 5 to 6) and perform
278  *  an upheap.
279  *
280  *  If we wanted to remove, say, cyp_cyclics[3], we would first scan for it
281  *  in the cyp_heap, and discover it at cyp_heap[1].  We would then decrement
282  *  the number of elements (from 5 to 4), swap cyp_heap[1] with cyp_heap[4],
283  *  and perform a downheap from cyp_heap[1].  The linear scan is required
284  *  because the cyclic does not keep a backpointer into the heap.  This makes
285  *  heap manipulation (e.g. downheaps) faster at the expense of removal
286  *  operations.
287  *
288  *  Expiry processing
289  *
290  *  As alluded to above, cyclic_expire() is called by cyclic_fire() to expire
291  *  a cyclic.  Cyclic subsystem consumers are guaranteed that for an arbitrary
292  *  time t in the future, their cyclic handler will have been called
293  *  (t - cyt_when) / cyt_interval times. cyclic_expire() simply needs to call
294  *  the handler.
295  *
296  *  Resizing
297  *
298  *  All of the discussion thus far has assumed a static number of cyclics.
299  *  Obviously, static limitations are not practical; we need the capacity
300  *  to resize our data structures dynamically.
301  *
302  *  We resize our data structures lazily, and only on a per-CPU basis.
303  *  The size of the data structures always doubles and never shrinks.  We
304  *  serialize adds (and thus resizes) on cpu_lock; we never need to deal
305  *  with concurrent resizes.  Resizes should be rare; they may induce jitter
306  *  on the CPU being resized, but should not affect cyclic operation on other
307  *  CPUs.
308  *
309  *  Three key cyc_cpu data structures need to be resized:  the cyclics array,
310  *  nad the heap array.  Resizing is relatively straightforward:
311  *
312  *    1.  The new, larger arrays are allocated in cyclic_expand() (called
313  *        from cyclic_add()).
314  *    2.  The contents of the old arrays are copied into the new arrays.
315  *    3.  The old cyclics array is bzero()'d
316  *    4.  The pointers are updated.
317  *
318  *  Removals
319  *
320  *  Cyclic removals should be rare.  To simplify the implementation (and to
321  *  allow optimization for the cyclic_fire()/cyclic_expire()
322  *  path), we force removals and adds to serialize on cpu_lock.
323  *
324  */
325 #include <sys/cdefs.h>
326 #include <sys/param.h>
327 #include <sys/conf.h>
328 #include <sys/kernel.h>
329 #ifdef __FreeBSD___
330 #include <sys/lock.h>
331 #include <sys/sx.h>
332 #endif
333 #include <sys/cyclic_impl.h>
334 #include <sys/module.h>
335 #include <sys/systm.h>
336 #include <sys/atomic.h>
337 #include <sys/kmem.h>
338 #include <sys/cmn_err.h>
339 #include <sys/dtrace_bsd.h>
340 #ifdef __FreeBSD__
341 #include <machine/cpu.h>
342 #endif
343 
344 #ifdef __NetBSD__
345 #include <sys/cpu.h>
346 #include <sys/malloc.h>
347 #include <sys/xcall.h>
348 
349 #undef mutex_init
350 #define mtx_init(m, d, p, f) mutex_init(m, MUTEX_DEFAULT, IPL_HIGH)
351 #define mtx_lock_spin(x) mutex_spin_enter(x)
352 #define mtx_unlock_spin(x) mutex_spin_exit(x)
353 #define mtx_destroy(x) mutex_destroy(x)
354 
355 #define SYSINIT(a1, a2, a3, a4, a5)
356 #define SYSUNINIT(a1, a2, a3, a4, a5)
357 #define CPU_FOREACH(var) \
358 	CPU_INFO_ITERATOR cii; \
359 	struct cpu_info *ci; \
360 	for (CPU_INFO_FOREACH(cii, ci))
361 #define MAXCPU MAXCPUS
362 #define TRAPF_USERMODE(x) CLKF_USERMODE(x)
363 #define TRAPF_PC(x) CLKF_PC(x)
364 #endif
365 
366 static kmem_cache_t *cyclic_id_cache;
367 static cyc_id_t *cyclic_id_head;
368 static cyc_backend_t cyclic_backend;
369 
370 MALLOC_DEFINE(M_CYCLIC, "cyclic", "Cyclic timer subsystem");
371 
372 /*
373  * Returns 1 if the upheap propagated to the root, 0 if it did not.  This
374  * allows the caller to reprogram the backend only when the root has been
375  * modified.
376  */
377 static int
378 cyclic_upheap(cyc_cpu_t *cpu, cyc_index_t ndx)
379 {
380 	cyclic_t *cyclics;
381 	cyc_index_t *heap;
382 	cyc_index_t heap_parent, heap_current = ndx;
383 	cyc_index_t parent, current;
384 
385 	if (heap_current == 0)
386 		return (1);
387 
388 	heap = cpu->cyp_heap;
389 	cyclics = cpu->cyp_cyclics;
390 	heap_parent = CYC_HEAP_PARENT(heap_current);
391 
392 	for (;;) {
393 		current = heap[heap_current];
394 		parent = heap[heap_parent];
395 
396 		/*
397 		 * We have an expiration time later than our parent; we're
398 		 * done.
399 		 */
400 		if (cyclics[current].cy_expire >= cyclics[parent].cy_expire)
401 			return (0);
402 
403 		/*
404 		 * We need to swap with our parent, and continue up the heap.
405 		 */
406 		heap[heap_parent] = current;
407 		heap[heap_current] = parent;
408 
409 		/*
410 		 * If we just reached the root, we're done.
411 		 */
412 		if (heap_parent == 0)
413 			return (1);
414 
415 		heap_current = heap_parent;
416 		heap_parent = CYC_HEAP_PARENT(heap_current);
417 	}
418 }
419 
420 static void
421 cyclic_downheap(cyc_cpu_t *cpu, cyc_index_t ndx)
422 {
423 	cyclic_t *cyclics = cpu->cyp_cyclics;
424 	cyc_index_t *heap = cpu->cyp_heap;
425 
426 	cyc_index_t heap_left, heap_right, heap_me = ndx;
427 	cyc_index_t left, right, me;
428 	cyc_index_t nelems = cpu->cyp_nelems;
429 
430 	for (;;) {
431 		/*
432 		 * If we don't have a left child (i.e., we're a leaf), we're
433 		 * done.
434 		 */
435 		if ((heap_left = CYC_HEAP_LEFT(heap_me)) >= nelems)
436 			return;
437 
438 		left = heap[heap_left];
439 		me = heap[heap_me];
440 
441 		heap_right = CYC_HEAP_RIGHT(heap_me);
442 
443 		/*
444 		 * Even if we don't have a right child, we still need to compare
445 		 * our expiration time against that of our left child.
446 		 */
447 		if (heap_right >= nelems)
448 			goto comp_left;
449 
450 		right = heap[heap_right];
451 
452 		/*
453 		 * We have both a left and a right child.  We need to compare
454 		 * the expiration times of the children to determine which
455 		 * expires earlier.
456 		 */
457 		if (cyclics[right].cy_expire < cyclics[left].cy_expire) {
458 			/*
459 			 * Our right child is the earlier of our children.
460 			 * We'll now compare our expiration time to its; if
461 			 * ours is the earlier, we're done.
462 			 */
463 			if (cyclics[me].cy_expire <= cyclics[right].cy_expire)
464 				return;
465 
466 			/*
467 			 * Our right child expires earlier than we do; swap
468 			 * with our right child, and descend right.
469 			 */
470 			heap[heap_right] = me;
471 			heap[heap_me] = right;
472 			heap_me = heap_right;
473 			continue;
474 		}
475 
476 comp_left:
477 		/*
478 		 * Our left child is the earlier of our children (or we have
479 		 * no right child).  We'll now compare our expiration time
480 		 * to its; if ours is the earlier, we're done.
481 		 */
482 		if (cyclics[me].cy_expire <= cyclics[left].cy_expire)
483 			return;
484 
485 		/*
486 		 * Our left child expires earlier than we do; swap with our
487 		 * left child, and descend left.
488 		 */
489 		heap[heap_left] = me;
490 		heap[heap_me] = left;
491 		heap_me = heap_left;
492 	}
493 }
494 
495 static void
496 cyclic_expire(cyc_cpu_t *cpu, cyc_index_t ndx, cyclic_t *cyclic)
497 {
498 	cyc_func_t handler = cyclic->cy_handler;
499 	void *arg = cyclic->cy_arg;
500 
501 	(*handler)(arg);
502 }
503 
504 /*
505  *  cyclic_fire(cpu_t *)
506  *
507  *  Overview
508  *
509  *    cyclic_fire() is the cyclic subsystem's interrupt handler.
510  *    Called by the cyclic backend.
511  *
512  *  Arguments and notes
513  *
514  *    The only argument is the CPU on which the interrupt is executing;
515  *    backends must call into cyclic_fire() on the specified CPU.
516  *
517  *    cyclic_fire() may be called spuriously without ill effect.  Optimal
518  *    backends will call into cyclic_fire() at or shortly after the time
519  *    requested via cyb_reprogram().  However, calling cyclic_fire()
520  *    arbitrarily late will only manifest latency bubbles; the correctness
521  *    of the cyclic subsystem does not rely on the timeliness of the backend.
522  *
523  *    cyclic_fire() is wait-free; it will not block or spin.
524  *
525  *  Return values
526  *
527  *    None.
528  *
529  */
530 static void
531 cyclic_fire(cpu_t *c)
532 {
533 	cyc_cpu_t *cpu = c->cpu_cyclic;
534 	cyc_backend_t *be = cpu->cyp_backend;
535 	cyc_index_t *heap = cpu->cyp_heap;
536 	cyclic_t *cyclic, *cyclics = cpu->cyp_cyclics;
537 	void *arg = be->cyb_arg;
538 	hrtime_t now = gethrtime();
539 	hrtime_t exp;
540 
541 	if (cpu->cyp_nelems == 0) {
542 		/* This is a spurious fire. */
543 		return;
544 	}
545 
546 	for (;;) {
547 		cyc_index_t ndx = heap[0];
548 
549 		cyclic = &cyclics[ndx];
550 
551 		ASSERT(!(cyclic->cy_flags & CYF_FREE));
552 
553 		if ((exp = cyclic->cy_expire) > now)
554 			break;
555 
556 		cyclic_expire(cpu, ndx, cyclic);
557 
558 		/*
559 		 * If this cyclic will be set to next expire in the distant
560 		 * past, we have one of two situations:
561 		 *
562 		 *   a)	This is the first firing of a cyclic which had
563 		 *	cy_expire set to 0.
564 		 *
565 		 *   b)	We are tragically late for a cyclic -- most likely
566 		 *	due to being in the debugger.
567 		 *
568 		 * In either case, we set the new expiration time to be the
569 		 * the next interval boundary.  This assures that the
570 		 * expiration time modulo the interval is invariant.
571 		 *
572 		 * We arbitrarily define "distant" to be one second (one second
573 		 * is chosen because it's shorter than any foray to the
574 		 * debugger while still being longer than any legitimate
575 		 * stretch).
576 		 */
577 		exp += cyclic->cy_interval;
578 
579 		if (now - exp > NANOSEC) {
580 			hrtime_t interval = cyclic->cy_interval;
581 
582 			exp += ((now - exp) / interval + 1) * interval;
583 		}
584 
585 		cyclic->cy_expire = exp;
586 		cyclic_downheap(cpu, 0);
587 	}
588 
589 	/*
590 	 * Now we have a cyclic in the root slot which isn't in the past;
591 	 * reprogram the interrupt source.
592 	 */
593 	be->cyb_reprogram(arg, exp);
594 }
595 
596 static void
597 cyclic_expand_xcall(cyc_xcallarg_t *arg)
598 {
599 	cyc_cpu_t *cpu = arg->cyx_cpu;
600 	cyc_index_t new_size = arg->cyx_size, size = cpu->cyp_size, i;
601 	cyc_index_t *new_heap = arg->cyx_heap;
602 	cyclic_t *cyclics = cpu->cyp_cyclics, *new_cyclics = arg->cyx_cyclics;
603 
604 	/* Disable preemption and interrupts. */
605 	mtx_lock_spin(&cpu->cyp_mtx);
606 
607 	/*
608 	 * Assert that the new size is a power of 2.
609 	 */
610 	ASSERT((new_size & (new_size - 1)) == 0);
611 	ASSERT(new_size == (size << 1));
612 	ASSERT(cpu->cyp_heap != NULL && cpu->cyp_cyclics != NULL);
613 
614 	bcopy(cpu->cyp_heap, new_heap, sizeof (cyc_index_t) * size);
615 	bcopy(cyclics, new_cyclics, sizeof (cyclic_t) * size);
616 
617 	/*
618 	 * Set up the free list, and set all of the new cyclics to be CYF_FREE.
619 	 */
620 	for (i = size; i < new_size; i++) {
621 		new_heap[i] = i;
622 		new_cyclics[i].cy_flags = CYF_FREE;
623 	}
624 
625 	/*
626 	 * We can go ahead and plow the value of cyp_heap and cyp_cyclics;
627 	 * cyclic_expand() has kept a copy.
628 	 */
629 	cpu->cyp_heap = new_heap;
630 	cpu->cyp_cyclics = new_cyclics;
631 	cpu->cyp_size = new_size;
632 	mtx_unlock_spin(&cpu->cyp_mtx);
633 }
634 
635 /*
636  * cyclic_expand() will cross call onto the CPU to perform the actual
637  * expand operation.
638  */
639 static void
640 cyclic_expand(cyc_cpu_t *cpu)
641 {
642 	cyc_index_t new_size, old_size;
643 	cyc_index_t *new_heap, *old_heap;
644 	cyclic_t *new_cyclics, *old_cyclics;
645 	cyc_xcallarg_t arg;
646 	cyc_backend_t *be = cpu->cyp_backend;
647 
648 	ASSERT(MUTEX_HELD(&cpu_lock));
649 
650 	old_heap = cpu->cyp_heap;
651 	old_cyclics = cpu->cyp_cyclics;
652 
653 	if ((new_size = ((old_size = cpu->cyp_size) << 1)) == 0) {
654 		new_size = CY_DEFAULT_PERCPU;
655 		ASSERT(old_heap == NULL && old_cyclics == NULL);
656 	}
657 
658 	/*
659 	 * Check that the new_size is a power of 2.
660 	 */
661 	ASSERT(((new_size - 1) & new_size) == 0);
662 
663 	new_heap = malloc(sizeof(cyc_index_t) * new_size, M_CYCLIC, M_WAITOK);
664 	new_cyclics = malloc(sizeof(cyclic_t) * new_size, M_CYCLIC, M_ZERO | M_WAITOK);
665 
666 	arg.cyx_cpu = cpu;
667 	arg.cyx_heap = new_heap;
668 	arg.cyx_cyclics = new_cyclics;
669 	arg.cyx_size = new_size;
670 
671 	be->cyb_xcall(be->cyb_arg, cpu->cyp_cpu,
672 	    (cyc_func_t)cyclic_expand_xcall, &arg);
673 
674 	if (old_cyclics != NULL) {
675 		ASSERT(old_heap != NULL);
676 		ASSERT(old_size != 0);
677 		free(old_cyclics, M_CYCLIC);
678 		free(old_heap, M_CYCLIC);
679 	}
680 }
681 
682 static void
683 cyclic_add_xcall(cyc_xcallarg_t *arg)
684 {
685 	cyc_cpu_t *cpu = arg->cyx_cpu;
686 	cyc_handler_t *hdlr = arg->cyx_hdlr;
687 	cyc_time_t *when = arg->cyx_when;
688 	cyc_backend_t *be = cpu->cyp_backend;
689 	cyc_index_t ndx, nelems;
690 	cyb_arg_t bar = be->cyb_arg;
691 	cyclic_t *cyclic;
692 
693 	ASSERT(cpu->cyp_nelems < cpu->cyp_size);
694 
695 	/* Disable preemption and interrupts. */
696 	mtx_lock_spin(&cpu->cyp_mtx);
697 	nelems = cpu->cyp_nelems++;
698 
699 	if (nelems == 0) {
700 		/*
701 		 * If this is the first element, we need to enable the
702 		 * backend on this CPU.
703 		 */
704 		be->cyb_enable(bar);
705 	}
706 
707 	ndx = cpu->cyp_heap[nelems];
708 	cyclic = &cpu->cyp_cyclics[ndx];
709 
710 	ASSERT(cyclic->cy_flags == CYF_FREE);
711 	cyclic->cy_interval = when->cyt_interval;
712 
713 	if (when->cyt_when == 0) {
714 		/*
715 		 * If a start time hasn't been explicitly specified, we'll
716 		 * start on the next interval boundary.
717 		 */
718 		cyclic->cy_expire = (gethrtime() / cyclic->cy_interval + 1) *
719 		    cyclic->cy_interval;
720 	} else {
721 		cyclic->cy_expire = when->cyt_when;
722 	}
723 
724 	cyclic->cy_handler = hdlr->cyh_func;
725 	cyclic->cy_arg = hdlr->cyh_arg;
726 	cyclic->cy_flags = arg->cyx_flags;
727 
728 	if (cyclic_upheap(cpu, nelems)) {
729 		hrtime_t exp = cyclic->cy_expire;
730 
731 		/*
732 		 * If our upheap propagated to the root, we need to
733 		 * reprogram the interrupt source.
734 		 */
735 		be->cyb_reprogram(bar, exp);
736 	}
737 	mtx_unlock_spin(&cpu->cyp_mtx);
738 
739 	arg->cyx_ndx = ndx;
740 }
741 
742 static cyc_index_t
743 cyclic_add_here(cyc_cpu_t *cpu, cyc_handler_t *hdlr,
744     cyc_time_t *when, uint16_t flags)
745 {
746 	cyc_backend_t *be = cpu->cyp_backend;
747 	cyb_arg_t bar = be->cyb_arg;
748 	cyc_xcallarg_t arg;
749 
750 	ASSERT(MUTEX_HELD(&cpu_lock));
751 	ASSERT(!(cpu->cyp_cpu->cpu_flags & CPU_OFFLINE));
752 	ASSERT(when->cyt_when >= 0 && when->cyt_interval > 0);
753 
754 	if (cpu->cyp_nelems == cpu->cyp_size) {
755 		/*
756 		 * This is expensive; it will cross call onto the other
757 		 * CPU to perform the expansion.
758 		 */
759 		cyclic_expand(cpu);
760 		ASSERT(cpu->cyp_nelems < cpu->cyp_size);
761 	}
762 
763 	/*
764 	 * By now, we know that we're going to be able to successfully
765 	 * perform the add.  Now cross call over to the CPU of interest to
766 	 * actually add our cyclic.
767 	 */
768 	arg.cyx_cpu = cpu;
769 	arg.cyx_hdlr = hdlr;
770 	arg.cyx_when = when;
771 	arg.cyx_flags = flags;
772 
773 	be->cyb_xcall(bar, cpu->cyp_cpu, (cyc_func_t)cyclic_add_xcall, &arg);
774 
775 	return (arg.cyx_ndx);
776 }
777 
778 static void
779 cyclic_remove_xcall(cyc_xcallarg_t *arg)
780 {
781 	cyc_cpu_t *cpu = arg->cyx_cpu;
782 	cyc_backend_t *be = cpu->cyp_backend;
783 	cyb_arg_t bar = be->cyb_arg;
784 	cyc_index_t ndx = arg->cyx_ndx, nelems = cpu->cyp_nelems, i;
785 	cyc_index_t *heap = cpu->cyp_heap, last;
786 	cyclic_t *cyclic;
787 
788 	ASSERT(nelems > 0);
789 
790 	/* Disable preemption and interrupts. */
791 	mtx_lock_spin(&cpu->cyp_mtx);
792 	cyclic = &cpu->cyp_cyclics[ndx];
793 
794 	/*
795 	 * Grab the current expiration time.  If this cyclic is being
796 	 * removed as part of a juggling operation, the expiration time
797 	 * will be used when the cyclic is added to the new CPU.
798 	 */
799 	if (arg->cyx_when != NULL) {
800 		arg->cyx_when->cyt_when = cyclic->cy_expire;
801 		arg->cyx_when->cyt_interval = cyclic->cy_interval;
802 	}
803 
804 	/*
805 	 * Now set the flags to CYF_FREE.  We don't need a membar_enter()
806 	 * between zeroing pend and setting the flags because we're at
807 	 * CY_HIGH_LEVEL (that is, the zeroing of pend and the setting
808 	 * of cy_flags appear atomic to softints).
809 	 */
810 	cyclic->cy_flags = CYF_FREE;
811 
812 	for (i = 0; i < nelems; i++) {
813 		if (heap[i] == ndx)
814 			break;
815 	}
816 
817 	if (i == nelems)
818 		panic("attempt to remove non-existent cyclic");
819 
820 	cpu->cyp_nelems = --nelems;
821 
822 	if (nelems == 0) {
823 		/*
824 		 * If we just removed the last element, then we need to
825 		 * disable the backend on this CPU.
826 		 */
827 		be->cyb_disable(bar);
828 	}
829 
830 	if (i == nelems) {
831 		/*
832 		 * If we just removed the last element of the heap, then
833 		 * we don't have to downheap.
834 		 */
835 		goto out;
836 	}
837 
838 	/*
839 	 * Swap the last element of the heap with the one we want to
840 	 * remove, and downheap (this has the implicit effect of putting
841 	 * the newly freed element on the free list).
842 	 */
843 	heap[i] = (last = heap[nelems]);
844 	heap[nelems] = ndx;
845 
846 	if (i == 0) {
847 		cyclic_downheap(cpu, 0);
848 	} else {
849 		if (cyclic_upheap(cpu, i) == 0) {
850 			/*
851 			 * The upheap didn't propagate to the root; if it
852 			 * didn't propagate at all, we need to downheap.
853 			 */
854 			if (heap[i] == last) {
855 				cyclic_downheap(cpu, i);
856 			}
857 			goto out;
858 		}
859 	}
860 
861 	/*
862 	 * We're here because we changed the root; we need to reprogram
863 	 * the clock source.
864 	 */
865 	cyclic = &cpu->cyp_cyclics[heap[0]];
866 
867 	ASSERT(nelems != 0);
868 	be->cyb_reprogram(bar, cyclic->cy_expire);
869 out:
870 	mtx_unlock_spin(&cpu->cyp_mtx);
871 }
872 
873 static int
874 cyclic_remove_here(cyc_cpu_t *cpu, cyc_index_t ndx, cyc_time_t *when, int wait)
875 {
876 	cyc_backend_t *be = cpu->cyp_backend;
877 	cyc_xcallarg_t arg;
878 
879 	ASSERT(MUTEX_HELD(&cpu_lock));
880 	ASSERT(wait == CY_WAIT || wait == CY_NOWAIT);
881 
882 	arg.cyx_ndx = ndx;
883 	arg.cyx_cpu = cpu;
884 	arg.cyx_when = when;
885 	arg.cyx_wait = wait;
886 
887 	be->cyb_xcall(be->cyb_arg, cpu->cyp_cpu,
888 	    (cyc_func_t)cyclic_remove_xcall, &arg);
889 
890 	return (1);
891 }
892 
893 static void
894 cyclic_configure(cpu_t *c)
895 {
896 	cyc_cpu_t *cpu = malloc(sizeof(cyc_cpu_t), M_CYCLIC, M_ZERO | M_WAITOK);
897 	cyc_backend_t *nbe = malloc(sizeof(cyc_backend_t), M_CYCLIC, M_ZERO | M_WAITOK);
898 
899 	ASSERT(MUTEX_HELD(&cpu_lock));
900 
901 	if (cyclic_id_cache == NULL)
902 		cyclic_id_cache = kmem_cache_create(__UNCONST("cyclic_id_cache"),
903 		    sizeof (cyc_id_t), 0, NULL, NULL, NULL, NULL, NULL, 0);
904 
905 	cpu->cyp_cpu = c;
906 
907 	cpu->cyp_size = 1;
908 	cpu->cyp_heap = malloc(sizeof(cyc_index_t), M_CYCLIC, M_ZERO | M_WAITOK);
909 	cpu->cyp_cyclics = malloc(sizeof(cyclic_t), M_CYCLIC, M_ZERO | M_WAITOK);
910 	cpu->cyp_cyclics->cy_flags = CYF_FREE;
911 
912 	mtx_init(&cpu->cyp_mtx, "cyclic cpu", NULL, MTX_SPIN);
913 
914 	/*
915 	 * Setup the backend for this CPU.
916 	 */
917 	bcopy(&cyclic_backend, nbe, sizeof (cyc_backend_t));
918 	if (nbe->cyb_configure != NULL)
919 		nbe->cyb_arg = nbe->cyb_configure(c);
920 	cpu->cyp_backend = nbe;
921 
922 	/*
923 	 * On platforms where stray interrupts may be taken during startup,
924 	 * the CPU's cpu_cyclic pointer serves as an indicator that the
925 	 * cyclic subsystem for this CPU is prepared to field interrupts.
926 	 */
927 	membar_producer();
928 
929 	c->cpu_cyclic = cpu;
930 }
931 
932 static void
933 cyclic_unconfigure(cpu_t *c)
934 {
935 	cyc_cpu_t *cpu = c->cpu_cyclic;
936 	cyc_backend_t *be = cpu->cyp_backend;
937 	cyb_arg_t bar = be->cyb_arg;
938 
939 	ASSERT(MUTEX_HELD(&cpu_lock));
940 
941 	c->cpu_cyclic = NULL;
942 
943 	/*
944 	 * Let the backend know that the CPU is being yanked, and free up
945 	 * the backend structure.
946 	 */
947 	if (be->cyb_unconfigure != NULL)
948 		be->cyb_unconfigure(bar);
949 	free(be, M_CYCLIC);
950 	cpu->cyp_backend = NULL;
951 
952 	mtx_destroy(&cpu->cyp_mtx);
953 
954 	/* Finally, clean up our remaining dynamic structures. */
955 	free(cpu->cyp_cyclics, M_CYCLIC);
956 	free(cpu->cyp_heap, M_CYCLIC);
957 	free(cpu, M_CYCLIC);
958 }
959 
960 static void
961 cyclic_omni_start(cyc_id_t *idp, cyc_cpu_t *cpu)
962 {
963 	cyc_omni_handler_t *omni = &idp->cyi_omni_hdlr;
964 	cyc_omni_cpu_t *ocpu = malloc(sizeof(cyc_omni_cpu_t), M_CYCLIC , M_WAITOK);
965 	cyc_handler_t hdlr;
966 	cyc_time_t when;
967 
968 	ASSERT(MUTEX_HELD(&cpu_lock));
969 	ASSERT(idp->cyi_cpu == NULL);
970 
971 	hdlr.cyh_func = NULL;
972 	hdlr.cyh_arg = NULL;
973 
974 	when.cyt_when = 0;
975 	when.cyt_interval = 0;
976 
977 	omni->cyo_online(omni->cyo_arg, cpu->cyp_cpu, &hdlr, &when);
978 
979 	ASSERT(hdlr.cyh_func != NULL);
980 	ASSERT(when.cyt_when >= 0 && when.cyt_interval > 0);
981 
982 	ocpu->cyo_cpu = cpu;
983 	ocpu->cyo_arg = hdlr.cyh_arg;
984 	ocpu->cyo_ndx = cyclic_add_here(cpu, &hdlr, &when, 0);
985 	ocpu->cyo_next = idp->cyi_omni_list;
986 	idp->cyi_omni_list = ocpu;
987 }
988 
989 static void
990 cyclic_omni_stop(cyc_id_t *idp, cyc_cpu_t *cpu)
991 {
992 	cyc_omni_handler_t *omni = &idp->cyi_omni_hdlr;
993 	cyc_omni_cpu_t *ocpu = idp->cyi_omni_list, *prev = NULL;
994 
995 	ASSERT(MUTEX_HELD(&cpu_lock));
996 	ASSERT(idp->cyi_cpu == NULL);
997 	ASSERT(ocpu != NULL);
998 
999 	while (ocpu != NULL && ocpu->cyo_cpu != cpu) {
1000 		prev = ocpu;
1001 		ocpu = ocpu->cyo_next;
1002 	}
1003 
1004 	/*
1005 	 * We _must_ have found a cyc_omni_cpu which corresponds to this
1006 	 * CPU -- the definition of an omnipresent cyclic is that it runs
1007 	 * on all online CPUs.
1008 	 */
1009 	ASSERT(ocpu != NULL);
1010 
1011 	if (prev == NULL) {
1012 		idp->cyi_omni_list = ocpu->cyo_next;
1013 	} else {
1014 		prev->cyo_next = ocpu->cyo_next;
1015 	}
1016 
1017 	(void) cyclic_remove_here(ocpu->cyo_cpu, ocpu->cyo_ndx, NULL, CY_WAIT);
1018 
1019 	/*
1020 	 * The cyclic has been removed from this CPU; time to call the
1021 	 * omnipresent offline handler.
1022 	 */
1023 	if (omni->cyo_offline != NULL)
1024 		omni->cyo_offline(omni->cyo_arg, cpu->cyp_cpu, ocpu->cyo_arg);
1025 
1026 	free(ocpu, M_CYCLIC);
1027 }
1028 
1029 static cyc_id_t *
1030 cyclic_new_id(void)
1031 {
1032 	cyc_id_t *idp;
1033 
1034 	ASSERT(MUTEX_HELD(&cpu_lock));
1035 
1036 	idp = kmem_cache_alloc(cyclic_id_cache, KM_SLEEP);
1037 
1038 	/*
1039 	 * The cyi_cpu field of the cyc_id_t structure tracks the CPU
1040 	 * associated with the cyclic.  If and only if this field is NULL, the
1041 	 * cyc_id_t is an omnipresent cyclic.  Note that cyi_omni_list may be
1042 	 * NULL for an omnipresent cyclic while the cyclic is being created
1043 	 * or destroyed.
1044 	 */
1045 	idp->cyi_cpu = NULL;
1046 	idp->cyi_ndx = 0;
1047 
1048 	idp->cyi_next = cyclic_id_head;
1049 	idp->cyi_prev = NULL;
1050 	idp->cyi_omni_list = NULL;
1051 
1052 	if (cyclic_id_head != NULL) {
1053 		ASSERT(cyclic_id_head->cyi_prev == NULL);
1054 		cyclic_id_head->cyi_prev = idp;
1055 	}
1056 
1057 	cyclic_id_head = idp;
1058 
1059 	return (idp);
1060 }
1061 
1062 /*
1063  *  cyclic_id_t cyclic_add(cyc_handler_t *, cyc_time_t *)
1064  *
1065  *  Overview
1066  *
1067  *    cyclic_add() will create an unbound cyclic with the specified handler and
1068  *    interval.  The cyclic will run on a CPU which both has interrupts enabled
1069  *    and is in the system CPU partition.
1070  *
1071  *  Arguments and notes
1072  *
1073  *    As its first argument, cyclic_add() takes a cyc_handler, which has the
1074  *    following members:
1075  *
1076  *      cyc_func_t cyh_func    <-- Cyclic handler
1077  *      void *cyh_arg          <-- Argument to cyclic handler
1078  *
1079  *    In addition to a cyc_handler, cyclic_add() takes a cyc_time, which
1080  *    has the following members:
1081  *
1082  *       hrtime_t cyt_when     <-- Absolute time, in nanoseconds since boot, at
1083  *                                 which to start firing
1084  *       hrtime_t cyt_interval <-- Length of interval, in nanoseconds
1085  *
1086  *    gethrtime() is the time source for nanoseconds since boot.  If cyt_when
1087  *    is set to 0, the cyclic will start to fire when cyt_interval next
1088  *    divides the number of nanoseconds since boot.
1089  *
1090  *    The cyt_interval field _must_ be filled in by the caller; one-shots are
1091  *    _not_ explicitly supported by the cyclic subsystem (cyclic_add() will
1092  *    assert that cyt_interval is non-zero).  The maximum value for either
1093  *    field is INT64_MAX; the caller is responsible for assuring that
1094  *    cyt_when + cyt_interval <= INT64_MAX.  Neither field may be negative.
1095  *
1096  *    For an arbitrary time t in the future, the cyclic handler is guaranteed
1097  *    to have been called (t - cyt_when) / cyt_interval times.  This will
1098  *    be true even if interrupts have been disabled for periods greater than
1099  *    cyt_interval nanoseconds.  In order to compensate for such periods,
1100  *    the cyclic handler may be called a finite number of times with an
1101  *    arbitrarily small interval.
1102  *
1103  *    The cyclic subsystem will not enforce any lower bound on the interval;
1104  *    if the interval is less than the time required to process an interrupt,
1105  *    the CPU will wedge.  It's the responsibility of the caller to assure that
1106  *    either the value of the interval is sane, or that its caller has
1107  *    sufficient privilege to deny service (i.e. its caller is root).
1108  *
1109  *  Return value
1110  *
1111  *    cyclic_add() returns a cyclic_id_t, which is guaranteed to be a value
1112  *    other than CYCLIC_NONE.  cyclic_add() cannot fail.
1113  *
1114  *  Caller's context
1115  *
1116  *    cpu_lock must be held by the caller, and the caller must not be in
1117  *    interrupt context.  cyclic_add() will perform a KM_SLEEP kernel
1118  *    memory allocation, so the usual rules (e.g. p_lock cannot be held)
1119  *    apply.  A cyclic may be added even in the presence of CPUs that have
1120  *    not been configured with respect to the cyclic subsystem, but only
1121  *    configured CPUs will be eligible to run the new cyclic.
1122  *
1123  *  Cyclic handler's context
1124  *
1125  *    Cyclic handlers will be executed in the interrupt context corresponding
1126  *    to the specified level (i.e. either high, lock or low level).  The
1127  *    usual context rules apply.
1128  *
1129  *    A cyclic handler may not grab ANY locks held by the caller of any of
1130  *    cyclic_add() or cyclic_remove(); the implementation of these functions
1131  *    may require blocking on cyclic handler completion.
1132  *    Moreover, cyclic handlers may not make any call back into the cyclic
1133  *    subsystem.
1134  */
1135 cyclic_id_t
1136 cyclic_add(cyc_handler_t *hdlr, cyc_time_t *when)
1137 {
1138 	cyc_id_t *idp = cyclic_new_id();
1139 	solaris_cpu_t *c = &solaris_cpu[cpu_number()];
1140 
1141 	ASSERT(MUTEX_HELD(&cpu_lock));
1142 	ASSERT(when->cyt_when >= 0 && when->cyt_interval > 0);
1143 
1144 	idp->cyi_cpu = c->cpu_cyclic;
1145 	idp->cyi_ndx = cyclic_add_here(idp->cyi_cpu, hdlr, when, 0);
1146 
1147 	return ((uintptr_t)idp);
1148 }
1149 
1150 /*
1151  *  cyclic_id_t cyclic_add_omni(cyc_omni_handler_t *)
1152  *
1153  *  Overview
1154  *
1155  *    cyclic_add_omni() will create an omnipresent cyclic with the specified
1156  *    online and offline handlers.  Omnipresent cyclics run on all online
1157  *    CPUs, including CPUs which have unbound interrupts disabled.
1158  *
1159  *  Arguments
1160  *
1161  *    As its only argument, cyclic_add_omni() takes a cyc_omni_handler, which
1162  *    has the following members:
1163  *
1164  *      void (*cyo_online)()   <-- Online handler
1165  *      void (*cyo_offline)()  <-- Offline handler
1166  *      void *cyo_arg          <-- Argument to be passed to on/offline handlers
1167  *
1168  *  Online handler
1169  *
1170  *    The cyo_online member is a pointer to a function which has the following
1171  *    four arguments:
1172  *
1173  *      void *                 <-- Argument (cyo_arg)
1174  *      cpu_t *                <-- Pointer to CPU about to be onlined
1175  *      cyc_handler_t *        <-- Pointer to cyc_handler_t; must be filled in
1176  *                                 by omni online handler
1177  *      cyc_time_t *           <-- Pointer to cyc_time_t; must be filled in by
1178  *                                 omni online handler
1179  *
1180  *    The omni cyclic online handler is always called _before_ the omni
1181  *    cyclic begins to fire on the specified CPU.  As the above argument
1182  *    description implies, the online handler must fill in the two structures
1183  *    passed to it:  the cyc_handler_t and the cyc_time_t.  These are the
1184  *    same two structures passed to cyclic_add(), outlined above.  This
1185  *    allows the omni cyclic to have maximum flexibility; different CPUs may
1186  *    optionally
1187  *
1188  *      (a)  have different intervals
1189  *      (b)  be explicitly in or out of phase with one another
1190  *      (c)  have different handlers
1191  *      (d)  have different handler arguments
1192  *      (e)  fire at different levels
1193  *
1194  *    Of these, (e) seems somewhat dubious, but is nonetheless allowed.
1195  *
1196  *    The omni online handler is called in the same context as cyclic_add(),
1197  *    and has the same liberties:  omni online handlers may perform KM_SLEEP
1198  *    kernel memory allocations, and may grab locks which are also acquired
1199  *    by cyclic handlers.  However, omni cyclic online handlers may _not_
1200  *    call back into the cyclic subsystem, and should be generally careful
1201  *    about calling into arbitrary kernel subsystems.
1202  *
1203  *  Offline handler
1204  *
1205  *    The cyo_offline member is a pointer to a function which has the following
1206  *    three arguments:
1207  *
1208  *      void *                 <-- Argument (cyo_arg)
1209  *      cpu_t *                <-- Pointer to CPU about to be offlined
1210  *      void *                 <-- CPU's cyclic argument (that is, value
1211  *                                 to which cyh_arg member of the cyc_handler_t
1212  *                                 was set in the omni online handler)
1213  *
1214  *    The omni cyclic offline handler is always called _after_ the omni
1215  *    cyclic has ceased firing on the specified CPU.  Its purpose is to
1216  *    allow cleanup of any resources dynamically allocated in the omni cyclic
1217  *    online handler.  The context of the offline handler is identical to
1218  *    that of the online handler; the same constraints and liberties apply.
1219  *
1220  *    The offline handler is optional; it may be NULL.
1221  *
1222  *  Return value
1223  *
1224  *    cyclic_add_omni() returns a cyclic_id_t, which is guaranteed to be a
1225  *    value other than CYCLIC_NONE.  cyclic_add_omni() cannot fail.
1226  *
1227  *  Caller's context
1228  *
1229  *    The caller's context is identical to that of cyclic_add(), specified
1230  *    above.
1231  */
1232 cyclic_id_t
1233 cyclic_add_omni(cyc_omni_handler_t *omni)
1234 {
1235 	cyc_id_t *idp = cyclic_new_id();
1236 	cyc_cpu_t *cpu;
1237 	cpu_t *c;
1238 	int i;
1239 
1240 	ASSERT(MUTEX_HELD(&cpu_lock));
1241 	ASSERT(omni != NULL && omni->cyo_online != NULL);
1242 
1243 	idp->cyi_omni_hdlr = *omni;
1244 
1245 	CPU_FOREACH(i) {
1246 		i = cpu_index(ci);
1247 		c = &solaris_cpu[i];
1248 		if ((cpu = c->cpu_cyclic) == NULL)
1249 			continue;
1250 		cyclic_omni_start(idp, cpu);
1251 	}
1252 
1253 	/*
1254 	 * We must have found at least one online CPU on which to run
1255 	 * this cyclic.
1256 	 */
1257 	ASSERT(idp->cyi_omni_list != NULL);
1258 	ASSERT(idp->cyi_cpu == NULL);
1259 
1260 	return ((uintptr_t)idp);
1261 }
1262 
1263 /*
1264  *  void cyclic_remove(cyclic_id_t)
1265  *
1266  *  Overview
1267  *
1268  *    cyclic_remove() will remove the specified cyclic from the system.
1269  *
1270  *  Arguments and notes
1271  *
1272  *    The only argument is a cyclic_id returned from either cyclic_add() or
1273  *    cyclic_add_omni().
1274  *
1275  *    By the time cyclic_remove() returns, the caller is guaranteed that the
1276  *    removed cyclic handler has completed execution (this is the same
1277  *    semantic that untimeout() provides).  As a result, cyclic_remove() may
1278  *    need to block, waiting for the removed cyclic to complete execution.
1279  *    This leads to an important constraint on the caller:  no lock may be
1280  *    held across cyclic_remove() that also may be acquired by a cyclic
1281  *    handler.
1282  *
1283  *  Return value
1284  *
1285  *    None; cyclic_remove() always succeeds.
1286  *
1287  *  Caller's context
1288  *
1289  *    cpu_lock must be held by the caller, and the caller must not be in
1290  *    interrupt context.  The caller may not hold any locks which are also
1291  *    grabbed by any cyclic handler.  See "Arguments and notes", above.
1292  */
1293 void
1294 cyclic_remove(cyclic_id_t id)
1295 {
1296 	cyc_id_t *idp = (cyc_id_t *)id;
1297 	cyc_id_t *prev = idp->cyi_prev, *next = idp->cyi_next;
1298 	cyc_cpu_t *cpu = idp->cyi_cpu;
1299 
1300 	ASSERT(MUTEX_HELD(&cpu_lock));
1301 
1302 	if (cpu != NULL) {
1303 		(void) cyclic_remove_here(cpu, idp->cyi_ndx, NULL, CY_WAIT);
1304 	} else {
1305 		ASSERT(idp->cyi_omni_list != NULL);
1306 		while (idp->cyi_omni_list != NULL)
1307 			cyclic_omni_stop(idp, idp->cyi_omni_list->cyo_cpu);
1308 	}
1309 
1310 	if (prev != NULL) {
1311 		ASSERT(cyclic_id_head != idp);
1312 		prev->cyi_next = next;
1313 	} else {
1314 		ASSERT(cyclic_id_head == idp);
1315 		cyclic_id_head = next;
1316 	}
1317 
1318 	if (next != NULL)
1319 		next->cyi_prev = prev;
1320 
1321 	kmem_cache_free(cyclic_id_cache, idp);
1322 }
1323 
1324 static void
1325 cyclic_init(cyc_backend_t *be)
1326 {
1327 	ASSERT(MUTEX_HELD(&cpu_lock));
1328 
1329 	/*
1330 	 * Copy the passed cyc_backend into the backend template.  This must
1331 	 * be done before the CPU can be configured.
1332 	 */
1333 	bcopy(be, &cyclic_backend, sizeof (cyc_backend_t));
1334 
1335 	cyclic_configure(&solaris_cpu[cpu_number()]);
1336 }
1337 
1338 /*
1339  * It is assumed that cyclic_mp_init() is called some time after cyclic
1340  * init (and therefore, after cpu0 has been initialized).  We grab cpu_lock,
1341  * find the already initialized CPU, and initialize every other CPU with the
1342  * same backend.
1343  */
1344 static void
1345 cyclic_mp_init(void)
1346 {
1347 	cpu_t *c;
1348 	int i;
1349 
1350 #ifndef __NetBSD__
1351 	mutex_enter(&cpu_lock);
1352 #endif
1353 
1354 	CPU_FOREACH(i) {
1355 		i = cpu_index(ci);
1356 		c = &solaris_cpu[i];
1357 		if (c->cpu_cyclic == NULL)
1358 			cyclic_configure(c);
1359 	}
1360 
1361 #ifndef __NetBSD__
1362 	mutex_exit(&cpu_lock);
1363 #endif
1364 }
1365 
1366 static void
1367 cyclic_uninit(void)
1368 {
1369 	cpu_t *c;
1370 	int id;
1371 
1372 	CPU_FOREACH(id) {
1373 		id = cpu_index(ci);
1374 		c = &solaris_cpu[id];
1375 		if (c->cpu_cyclic == NULL)
1376 			continue;
1377 		cyclic_unconfigure(c);
1378 	}
1379 
1380 	if (cyclic_id_cache != NULL)
1381 		kmem_cache_destroy(cyclic_id_cache);
1382 }
1383 
1384 #include "cyclic_machdep.c"
1385 
1386 /*
1387  *  Cyclic subsystem initialisation.
1388  */
1389 static void
1390 cyclic_load(void *dummy)
1391 {
1392 	mutex_enter(&cpu_lock);
1393 
1394 	/* Initialise the machine-dependent backend. */
1395 	cyclic_machdep_init();
1396 
1397 	mutex_exit(&cpu_lock);
1398 }
1399 
1400 SYSINIT(cyclic_register, SI_SUB_CYCLIC, SI_ORDER_SECOND, cyclic_load, NULL);
1401 
1402 static void
1403 cyclic_unload(void)
1404 {
1405 	mutex_enter(&cpu_lock);
1406 
1407 	/* Uninitialise the machine-dependent backend. */
1408 	cyclic_machdep_uninit();
1409 
1410 	mutex_exit(&cpu_lock);
1411 }
1412 
1413 SYSUNINIT(cyclic_unregister, SI_SUB_CYCLIC, SI_ORDER_SECOND, cyclic_unload, NULL);
1414 
1415 #ifdef __FreeBSD__
1416 /* ARGSUSED */
1417 static int
1418 cyclic_modevent(module_t mod __unused, int type, void *data __unused)
1419 {
1420 	int error = 0;
1421 
1422 	switch (type) {
1423 	case MOD_LOAD:
1424 		break;
1425 
1426 	case MOD_UNLOAD:
1427 		break;
1428 
1429 	case MOD_SHUTDOWN:
1430 		break;
1431 
1432 	default:
1433 		error = EOPNOTSUPP;
1434 		break;
1435 
1436 	}
1437 	return (error);
1438 }
1439 
1440 DEV_MODULE(cyclic, cyclic_modevent, NULL);
1441 MODULE_VERSION(cyclic, 1);
1442 MODULE_DEPEND(cyclic, opensolaris, 1, 1, 1);
1443 #endif
1444 
1445 #ifdef __NetBSD__
1446 static int
1447 cyclic_modcmd(modcmd_t cmd, void *data)
1448 {
1449 	switch (cmd) {
1450 	case MODULE_CMD_INIT:
1451 		cyclic_load(NULL);
1452 		return 0;
1453 
1454 	case MODULE_CMD_FINI:
1455 		cyclic_unload();
1456 		return 0;
1457 
1458 	case MODULE_CMD_AUTOUNLOAD:
1459 		if (cyclic_id_head != NULL)
1460 			return EBUSY;
1461 		return 0;
1462 
1463 	default:
1464 		return ENOTTY;
1465 	}
1466 }
1467 
1468 MODULE(MODULE_CLASS_MISC, cyclic, "solaris");
1469 #endif
1470