Lines Matching defs:callout
45 #include <sys/callout.h>
77 SDT_PROBE_DEFINE1(callout_execute, , , callout__start, "struct callout *");
78 SDT_PROBE_DEFINE1(callout_execute, , , callout__end, "struct callout *");
134 * The callout cpu exec entities represent informations necessary for
136 * necessary for migrating callouts to the new callout cpu. In particular,
137 * the first entry of the array cc_exec_entity holds informations for callout
139 * for callout running directly from hardware interrupt context.
141 * the migrating callout is already running.
144 struct callout *cc_curr;
160 * state for the callout processing thread on the individual CPU.
165 struct callout *cc_next;
208 static void softclock_call_cc(struct callout *c, struct callout_cpu *cc,
214 static MALLOC_DEFINE(M_CALLOUT, "callout", "Callout datastructures");
218 * cc_curr - If a callout is in progress, it is cc_curr.
221 * relevant callout completes.
223 * guarantees that the current callout will not run.
234 * Resets the execution entity tied to a specific callout cpu.
253 * Checks if migration is requested by a specific callout cpu.
277 * Calculate the size of the callout wheel and the preallocated
279 * XXX: Clip callout to result of previous function of maxusers
286 * Calculate callout wheel size, should be next power of two higher
299 * Initialize callout wheels. The software interrupt threads
311 * Initialize the per-cpu callout structures.
318 mtx_init(&cc->cc_lock, "callout", NULL, MTX_SPIN);
336 * Switches the cpu tied to a specific callout.
337 * The function expects a locked incoming callout cpu and returns with
338 * locked outcoming callout cpu.
341 callout_cpu_switch(struct callout *c, struct callout_cpu *cc, int new_cpu)
349 * Avoid interrupts and preemption firing after the callout cpu
351 * may be willing to acquire the callout cpu lock.
431 struct callout *c, *next;
477 /* Run the callout if present time within allowed. */
480 * Consumer told us the callout may be run
561 callout_lock(struct callout *c)
585 callout_cc_add(struct callout *c, struct callout_cpu *cc,
612 * Inform the eventtimers(4) subsystem there's a new callout
625 softclock_call_cc(struct callout *c, struct callout_cpu *cc,
691 * The callout may have been cancelled
699 /* The callout cannot be stopped now. */
705 CTR3(KTR_CALLOUT, "callout giant %p func %p arg %p",
711 CTR3(KTR_CALLOUT, "callout lock %p func %p arg %p",
719 CTR3(KTR_CALLOUT, "callout %p func %p arg %p",
722 KTR_STATE3(KTR_SCHED, "callout", cc->cc_ktr_event_name, "running",
739 "Expensive callout(9) function: %p(%p) %jd.%09ld s\n",
746 KTR_STATE0(KTR_SCHED, "callout", cc->cc_ktr_event_name, "idle");
747 CTR1(KTR_CALLOUT, "callout %p finished", c);
757 * callout to complete.
758 * If the callout was scheduled for
765 * It should be assert here that the callout is not
777 * If the callout was scheduled for
788 * It should be assert here that the callout is not destroyed
791 * As first thing, handle deferred callout stops.
814 * The callout mechanism is based on the work of Adam M. Costello and
834 struct callout *c;
923 * New interface; clients allocate their own callout structures.
927 * callout_init() - initialize a callout structure so that it can
930 * <sys/callout.h> defines three convenience macros:
932 * callout_active() - returns truth if callout has not been stopped,
933 * drained, or deactivated since the last time the callout was
935 * callout_pending() - returns truth if callout is still waiting for timeout
936 * callout_deactivate() - marks the callout as having been serviced
939 callout_reset_sbt_on(struct callout *c, sbintime_t sbt, sbintime_t prec,
961 ("%s: direct callout %p has non-spin lock", __func__, c));
971 * We're being asked to reschedule a callout which is
973 * can cancel the callout if it has not really started.
980 * callout. Don't reschedule.
1023 * If the callout must migrate try to perform it immediately.
1024 * If the callout is currently running, just defer the migration
1031 * actually executing the callout on another
1032 * CPU. That callout should be waiting on the
1035 * lock on the executing callout proceeds, it
1037 * At the return from the actual callout execution
1039 * and this new callout will be placed on the
1076 callout_schedule_on(struct callout *c, int to_ticks, int cpu)
1082 callout_schedule(struct callout *c, int to_ticks)
1088 _callout_stop_safe(struct callout *c, int flags)
1128 * were migrating *as* the callout is about to
1130 * the callout wants.
1135 * on one yet). When the callout wheel runs,
1136 * it will ignore this callout.
1146 * If the callout was migrating while the callout cpu lock was
1163 * If the callout is running, try to stop it or drain it.
1169 * draining and the callout is currently executing, first wait
1177 * The current callout is running (or just
1236 * The current callout is waiting for its
1237 * lock which we hold. Cancel the callout
1239 * lock, the callout will be skipped in
1247 ("callout wrongly scheduled for migration"));
1263 * The callout is currently being serviced
1264 * and the "next" callout is scheduled at
1276 * reschedule of the callout when the
1305 * For not scheduled and not executing callout return
1333 callout_init(struct callout *c, int mpsafe)
1347 _callout_init_lock(struct callout *c, struct lock_object *lock, int flags)
1354 ("%s: callout %p has sleepable lock", __func__, c));
1360 *c = (struct callout ){
1385 struct callout *tmp;
1479 _show_callout(struct callout *c)
1482 db_printf("callout %p\n", c);
1496 DB_SHOW_COMMAND(callout, db_show_callout)
1500 db_printf("usage: show callout <struct callout *>\n");
1504 _show_callout((struct callout *)addr);
1516 db_printf("cpu %d last%s callout function: %p ", cpu, dirstr, func);
1518 db_printf("\ncpu %d last%s callout argument: %p\n", cpu, dirstr, arg);