Lines Matching defs:cc
179 #define cc_exec_curr(cc, dir) cc->cc_exec_entity[dir].cc_curr
180 #define cc_exec_last_func(cc, dir) cc->cc_exec_entity[dir].cc_last_func
181 #define cc_exec_last_arg(cc, dir) cc->cc_exec_entity[dir].cc_last_arg
182 #define cc_exec_next(cc) cc->cc_next
183 #define cc_exec_cancel(cc, dir) cc->cc_exec_entity[dir].cc_cancel
184 #define cc_exec_waiting(cc, dir) cc->cc_exec_entity[dir].cc_waiting
186 #define cc_migration_func(cc, dir) cc->cc_exec_entity[dir].ce_migration_func
187 #define cc_migration_arg(cc, dir) cc->cc_exec_entity[dir].ce_migration_arg
188 #define cc_migration_cpu(cc, dir) cc->cc_exec_entity[dir].ce_migration_cpu
189 #define cc_migration_time(cc, dir) cc->cc_exec_entity[dir].ce_migration_time
190 #define cc_migration_prec(cc, dir) cc->cc_exec_entity[dir].ce_migration_prec
201 #define CC_LOCK(cc) mtx_lock_spin(&(cc)->cc_lock)
202 #define CC_UNLOCK(cc) mtx_unlock_spin(&(cc)->cc_lock)
203 #define CC_LOCK_ASSERT(cc) mtx_assert(&(cc)->cc_lock, MA_OWNED)
207 static void callout_cpu_init(struct callout_cpu *cc, int cpu);
208 static void softclock_call_cc(struct callout *c, struct callout_cpu *cc,
237 cc_cce_cleanup(struct callout_cpu *cc, int direct)
240 cc_exec_curr(cc, direct) = NULL;
241 cc_exec_cancel(cc, direct) = false;
242 cc_exec_waiting(cc, direct) = false;
244 cc_migration_cpu(cc, direct) = CPUBLOCK;
245 cc_migration_time(cc, direct) = 0;
246 cc_migration_prec(cc, direct) = 0;
247 cc_migration_func(cc, direct) = NULL;
248 cc_migration_arg(cc, direct) = NULL;
256 cc_cce_migrating(struct callout_cpu *cc, int direct)
260 return (cc_migration_cpu(cc, direct) != CPUBLOCK);
273 struct callout_cpu *cc;
304 cc = CC_CPU(cpu);
305 callout_cpu_init(cc, cpu);
314 callout_cpu_init(struct callout_cpu *cc, int cpu)
318 mtx_init(&cc->cc_lock, "callout", NULL, MTX_SPIN);
319 cc->cc_callwheel = malloc_domainset(sizeof(struct callout_list) *
323 LIST_INIT(&cc->cc_callwheel[i]);
324 TAILQ_INIT(&cc->cc_expireq);
325 cc->cc_firstevent = SBT_MAX;
327 cc_cce_cleanup(cc, i);
329 snprintf(cc->cc_ktr_event_name, sizeof(cc->cc_ktr_event_name),
341 callout_cpu_switch(struct callout *c, struct callout_cpu *cc, int new_cpu)
345 MPASS(c != NULL && cc != NULL);
346 CC_LOCK_ASSERT(cc);
355 CC_UNLOCK(cc);
372 struct callout_cpu *cc;
378 cc = CC_CPU(cpu);
379 error = kproc_kthread_add(softclock_thread, cc, &p, &td,
384 CC_LOCK(cc);
385 cc->cc_thread = td;
390 thread_lock_set(td, (struct mtx *)&cc->cc_lock);
427 struct callout_cpu *cc;
432 struct callout_cpu *cc;
441 cc = CC_SELF();
442 mtx_lock_spin_flags(&cc->cc_lock, MTX_QUIET);
445 firstb = callout_hash(cc->cc_lastscan);
446 cc->cc_lastscan = now;
475 sc = &cc->cc_callwheel[firstb & callwheelmask];
487 cc_exec_next(cc) = next;
488 cc->cc_bucket = firstb & callwheelmask;
490 softclock_call_cc(c, cc,
495 next = cc_exec_next(cc);
496 cc_exec_next(cc) = NULL;
499 TAILQ_INSERT_TAIL(&cc->cc_expireq,
534 cc->cc_firstevent = last;
542 if (!TAILQ_EMPTY(&cc->cc_expireq)) {
543 entropy.cc = cc;
548 td = cc->cc_thread;
555 mtx_unlock_spin_flags(&cc->cc_lock, MTX_QUIET);
557 mtx_unlock_spin_flags(&cc->cc_lock, MTX_QUIET);
563 struct callout_cpu *cc;
575 cc = CC_CPU(cpu);
576 CC_LOCK(cc);
579 CC_UNLOCK(cc);
581 return (cc);
585 callout_cc_add(struct callout *c, struct callout_cpu *cc,
591 CC_LOCK_ASSERT(cc);
592 if (sbt < cc->cc_lastscan)
593 sbt = cc->cc_lastscan;
607 LIST_INSERT_HEAD(&cc->cc_callwheel[bucket], c, c_links.le);
608 if (cc->cc_bucket == bucket)
609 cc_exec_next(cc) = c;
618 if (sbt < cc->cc_firstevent) {
619 cc->cc_firstevent = sbt;
625 softclock_call_cc(struct callout *c, struct callout_cpu *cc,
652 CC_LOCK_ASSERT(cc);
671 cc_exec_curr(cc, direct) = c;
672 cc_exec_last_func(cc, direct) = c_func;
673 cc_exec_last_arg(cc, direct) = c_arg;
674 cc_exec_cancel(cc, direct) = false;
679 cc_exec_curr(cc, direct) = NULL;
680 callout_cc_add(c, cc,
681 cc->cc_lastscan + c->c_precision / 2,
686 CC_UNLOCK(cc);
688 CC_UNLOCK(cc);
694 if (cc_exec_cancel(cc, direct)) {
700 cc_exec_cancel(cc, direct) = true;
715 CC_UNLOCK(cc);
722 KTR_STATE3(KTR_SCHED, "callout", cc->cc_ktr_event_name, "running",
746 KTR_STATE0(KTR_SCHED, "callout", cc->cc_ktr_event_name, "idle");
751 CC_LOCK(cc);
752 KASSERT(cc_exec_curr(cc, direct) == c, ("mishandled cc_curr"));
753 cc_exec_curr(cc, direct) = NULL;
754 if (cc_exec_waiting(cc, direct)) {
761 if (cc_cce_migrating(cc, direct)) {
762 cc_cce_cleanup(cc, direct);
770 cc_exec_waiting(cc, direct) = false;
771 CC_UNLOCK(cc);
772 wakeup(&cc_exec_waiting(cc, direct));
773 CC_LOCK(cc);
774 } else if (cc_cce_migrating(cc, direct)) {
780 new_cpu = cc_migration_cpu(cc, direct);
781 new_time = cc_migration_time(cc, direct);
782 new_prec = cc_migration_prec(cc, direct);
783 new_func = cc_migration_func(cc, direct);
784 new_arg = cc_migration_arg(cc, direct);
785 cc_cce_cleanup(cc, direct);
801 new_cc = callout_cpu_switch(c, cc, new_cpu);
806 CC_LOCK(cc);
833 struct callout_cpu *cc;
839 cc = (struct callout_cpu *)arg;
840 CC_LOCK(cc);
842 while (TAILQ_EMPTY(&cc->cc_expireq)) {
844 * Use CC_LOCK(cc) as the thread_lock while
848 thread_lock_set(td, (struct mtx *)&cc->cc_lock);
853 CC_LOCK(cc);
859 while ((c = TAILQ_FIRST(&cc->cc_expireq)) != NULL) {
860 TAILQ_REMOVE(&cc->cc_expireq, c, c_links.tqe);
861 softclock_call_cc(c, cc,
943 struct callout_cpu *cc;
963 cc = callout_lock(c);
969 if (cc_exec_curr(cc, direct) == c) {
975 if (c->c_lock != NULL && !cc_exec_cancel(cc, direct))
976 cancelled = cc_exec_cancel(cc, direct) = true;
977 if (cc_exec_waiting(cc, direct)) {
985 CC_UNLOCK(cc);
997 cc_migration_cpu(cc, direct) = cpu;
998 cc_migration_time(cc, direct) = to_sbt;
999 cc_migration_prec(cc, direct) = precision;
1000 cc_migration_func(cc, direct) = ftn;
1001 cc_migration_arg(cc, direct) = arg;
1003 CC_UNLOCK(cc);
1010 if (cc_exec_next(cc) == c)
1011 cc_exec_next(cc) = LIST_NEXT(c, c_links.le);
1014 TAILQ_REMOVE(&cc->cc_expireq, c, c_links.tqe);
1028 if (cc_exec_curr(cc, direct) == c) {
1045 cc_migration_cpu(cc, direct) = cpu;
1046 cc_migration_time(cc, direct) = to_sbt;
1047 cc_migration_prec(cc, direct) = precision;
1048 cc_migration_func(cc, direct) = ftn;
1049 cc_migration_arg(cc, direct) = arg;
1056 CC_UNLOCK(cc);
1059 cc = callout_cpu_switch(c, cc, cpu);
1063 callout_cc_add(c, cc, to_sbt, precision, ftn, arg, flags);
1067 CC_UNLOCK(cc);
1090 struct callout_cpu *cc, *old_cc;
1121 cc = callout_lock(c);
1150 if (sq_locked != 0 && cc != old_cc) {
1152 CC_UNLOCK(cc);
1165 if (cc_exec_curr(cc, direct) == c) {
1182 if (cc_exec_curr(cc, direct) == c) {
1201 CC_UNLOCK(cc);
1203 &cc_exec_waiting(cc, direct));
1205 old_cc = cc;
1215 cc_exec_waiting(cc, direct) = true;
1217 CC_UNLOCK(cc);
1219 &cc_exec_waiting(cc, direct),
1220 &cc->cc_lock.lock_object, "codrain",
1223 &cc_exec_waiting(cc, direct),
1233 } else if (use_lock && !cc_exec_cancel(cc, direct)) {
1243 cc_exec_cancel(cc, direct) = true;
1246 KASSERT(!cc_cce_migrating(cc, direct),
1251 cc_migration_cpu(cc, direct) = CPUBLOCK;
1252 cc_migration_time(cc, direct) = 0;
1253 cc_migration_prec(cc, direct) = 0;
1254 cc_migration_func(cc, direct) = NULL;
1255 cc_migration_arg(cc, direct) = NULL;
1258 CC_UNLOCK(cc);
1279 cc_migration_cpu(cc, direct) = CPUBLOCK;
1280 cc_migration_time(cc, direct) = 0;
1281 cc_migration_prec(cc, direct) = 0;
1282 cc_migration_func(cc, direct) = NULL;
1283 cc_migration_arg(cc, direct) = NULL;
1287 CC_UNLOCK(cc);
1299 sleepq_release(&cc_exec_waiting(cc, direct));
1308 if (cc_exec_curr(cc, direct) != c)
1310 CC_UNLOCK(cc);
1321 if (cc_exec_next(cc) == c)
1322 cc_exec_next(cc) = LIST_NEXT(c, c_links.le);
1325 TAILQ_REMOVE(&cc->cc_expireq, c, c_links.tqe);
1328 CC_UNLOCK(cc);
1386 struct callout_cpu *cc;
1404 cc = CC_CPU(cpu);
1405 CC_LOCK(cc);
1407 sc = &cc->cc_callwheel[i];
1428 CC_UNLOCK(cc);
1510 struct callout_cpu *cc;
1513 cc = CC_CPU(cpu);
1514 func = cc_exec_last_func(cc, direct);
1515 arg = cc_exec_last_arg(cc, direct);