Lines Matching +full:needs +full:- +full:reset +full:- +full:on +full:- +full:resume
1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * Implementation of sleep queues used to hold queue of threads blocked on
43 * a free list of the sleep queues of other threads blocked on the same
47 * ability to set a timeout. The timeout is managed using a per-thread
55 * pre-existing abuse of that API. The same lock must also be held when
101 #define SC_MASK (SC_TABLESIZE - 1)
110 * that a sleep queue is on when it is attached to a wait channel. The
115 * list of threads blocked on that wait channel, flags specific to the
123 * c - sleep queue chain lock
163 * Prototypes for non-exported routines.
265 mtx_lock_spin(&sc->sc_lock);
281 mtx_assert(&sc->sc_lock, MA_OWNED);
282 LIST_FOREACH(sq, &sc->sc_queues, sq_hash)
283 if (sq->sq_wchan == wchan)
297 mtx_unlock_spin(&sc->sc_lock);
301 * Places the current thread on the sleep queue for the specified wait
316 mtx_assert(&sc->sc_lock, MA_OWNED);
317 MPASS(td->td_sleepqueue != NULL);
327 ("%s: td %p to sleep on wchan %p with sleeping prohibited",
343 sq = td->td_sleepqueue;
345 KASSERT(TAILQ_EMPTY(&sq->sq_blocked[i]),
347 KASSERT(sq->sq_blockedcnt[i] == 0,
350 KASSERT(LIST_EMPTY(&sq->sq_free),
351 ("thread's sleep queue has a non-empty free list"));
352 KASSERT(sq->sq_wchan == NULL, ("stale sq_wchan pointer"));
353 sq->sq_lock = lock;
356 sc->sc_depth++;
357 if (sc->sc_depth > sc->sc_max_depth) {
358 sc->sc_max_depth = sc->sc_depth;
359 if (sc->sc_max_depth > sleepq_max_depth)
360 sleepq_max_depth = sc->sc_max_depth;
363 sq = td->td_sleepqueue;
364 LIST_INSERT_HEAD(&sc->sc_queues, sq, sq_hash);
365 sq->sq_wchan = wchan;
366 sq->sq_type = flags & SLEEPQ_TYPE;
368 MPASS(wchan == sq->sq_wchan);
369 MPASS(lock == sq->sq_lock);
370 MPASS((flags & SLEEPQ_TYPE) == sq->sq_type);
371 LIST_INSERT_HEAD(&sq->sq_free, td->td_sleepqueue, sq_hash);
374 TAILQ_INSERT_TAIL(&sq->sq_blocked[queue], td, td_slpq);
375 sq->sq_blockedcnt[queue]++;
376 td->td_sleepqueue = NULL;
377 td->td_sqqueue = queue;
378 td->td_wchan = wchan;
379 td->td_wmesg = wmesg;
381 td->td_intrval = 0;
382 td->td_flags |= TDF_SINTR;
384 td->td_flags &= ~TDF_TIMEOUT;
403 mtx_assert(&sc->sc_lock, MA_OWNED);
405 MPASS(td->td_sleepqueue == NULL);
409 KASSERT(td->td_sleeptimo == 0, ("td %d %p td_sleeptimo %jx",
410 td->td_tid, td, (uintmax_t)td->td_sleeptimo));
412 callout_when(sbt, pr, flags, &td->td_sleeptimo, &pr1);
414 callout_reset_sbt_on(&td->td_slpcallout, td->td_sleeptimo, pr1,
432 return (sq->sq_blockedcnt[queue]);
441 mtx_assert(&sc->sc_lock, MA_OWNED);
443 if ((td->td_pflags & TDP_WAKEUP) != 0) {
444 td->td_pflags &= ~TDP_WAKEUP;
458 mtx_unlock_spin(&sc->sc_lock);
460 p = td->td_proc;
462 (void *)td, (long)p->p_pid, td->td_name);
473 mtx_lock_spin(&sc->sc_lock);
481 * Lock the per-process spinlock prior to dropping the
487 mtx_lock_spin(&sc->sc_lock);
510 mtx_assert(&sc->sc_lock, MA_OWNED);
516 mtx_assert(&sc->sc_lock, MA_OWNED);
527 * on the sleep queue, remove it from the sleep queue.
533 MPASS(td->td_lock != &sc->sc_lock);
534 mtx_unlock_spin(&sc->sc_lock);
541 * Switches to another thread if we are still asleep on a sleep queue.
543 * The thread lock is required on entry and is no longer held on return.
555 mtx_assert(&sc->sc_lock, MA_OWNED);
562 if (td->td_sleepqueue != NULL) {
563 mtx_unlock_spin(&sc->sc_lock);
570 * already but we are still on the sleep queue, so dequeue the
573 * Do the same if the real-time clock has been adjusted since this
574 * thread calculated its timeout based on that clock. This handles
576 * - The Ts thread needs to sleep until an absolute real-clock time.
577 * It copies the global rtc_generation into curthread->td_rtcgen,
578 * reads the RTC, and calculates a sleep duration based on that time.
580 * - The Tc thread adjusts the RTC, bumps rtc_generation, and wakes
581 * threads that are sleeping until an absolute real-clock time.
583 * - Ts reaches the code below. It holds the sleepqueue chain lock,
587 rtc_changed = td->td_rtcgen != 0 && td->td_rtcgen != rtc_generation;
588 if ((td->td_flags & TDF_TIMEOUT) || rtc_changed) {
590 td->td_rtcgen = 0;
595 mtx_unlock_spin(&sc->sc_lock);
601 sleepq_profile(td->td_wmesg);
603 MPASS(td->td_sleepqueue == NULL);
605 thread_lock_set(td, &sc->sc_lock);
610 CTR3(KTR_PROC, "sleepq resume: thread %p (pid %ld, %s)",
611 (void *)td, (long)td->td_proc->p_pid, (void *)td->td_name);
625 if (td->td_sleeptimo != 0) {
626 if (td->td_sleeptimo <= sbinuptime())
628 td->td_sleeptimo = 0;
642 KASSERT((td->td_flags & TDF_SINTR) == 0,
645 return (td->td_intrval);
657 MPASS(!(td->td_flags & TDF_SINTR));
687 MPASS(!(td->td_flags & TDF_SINTR));
728 return (-1);
729 type = sq->sq_type;
737 * Requires the sc chain locked on entry. If SRQ_HOLD is specified it will
738 * be locked on return. Returns without the thread lock held.
748 MPASS(sq->sq_wchan != NULL);
749 MPASS(td->td_wchan == sq->sq_wchan);
751 sc = SC_LOOKUP(sq->sq_wchan);
752 mtx_assert(&sc->sc_lock, MA_OWNED);
755 * Avoid recursing on the chain lock. If the locks don't match we
774 mtx_unlock_spin(&sc->sc_lock);
778 if (pri != 0 && td->td_priority > pri &&
779 PRI_BASE(td->td_pri_class) == PRI_TIMESHARE)
784 * sleepq_catch_signals() on another CPU or is blocked on its
804 MPASS(sq->sq_wchan != NULL);
805 MPASS(td->td_wchan == sq->sq_wchan);
806 MPASS(td->td_sqqueue < NR_SLEEPQS && td->td_sqqueue >= 0);
808 sc = SC_LOOKUP(sq->sq_wchan);
809 mtx_assert(&sc->sc_lock, MA_OWNED);
811 SDT_PROBE2(sched, , , wakeup, td, td->td_proc);
814 sq->sq_blockedcnt[td->td_sqqueue]--;
815 TAILQ_REMOVE(&sq->sq_blocked[td->td_sqqueue], td, td_slpq);
822 if (LIST_EMPTY(&sq->sq_free)) {
823 td->td_sleepqueue = sq;
825 sq->sq_wchan = NULL;
828 sc->sc_depth--;
831 td->td_sleepqueue = LIST_FIRST(&sq->sq_free);
832 LIST_REMOVE(td->td_sleepqueue, sq_hash);
834 if ((td->td_flags & TDF_TIMEOUT) == 0 && td->td_sleeptimo != 0 &&
835 td->td_lock == &sc->sc_lock) {
839 * type-stable, the callout will use the correct
843 * get spurious wakeups, even if the callout was reset
848 * order of callout lock -> scheduler lock. The thread
853 callout_stop(&td->td_slpcallout);
856 td->td_wmesg = NULL;
857 td->td_wchan = NULL;
858 td->td_flags &= ~(TDF_SINTR | TDF_TIMEOUT);
861 (void *)td, (long)td->td_proc->p_pid, td->td_name);
873 wchan = td->td_wchan;
875 mtx_lock_spin(&sc->sc_lock);
880 mtx_unlock_spin(&sc->sc_lock);
896 MPASS(TAILQ_EMPTY(&sq->sq_blocked[i]));
897 MPASS(sq->sq_blockedcnt[i] == 0);
914 TAILQ_INIT(&sq->sq_blocked[i]);
915 sq->sq_blockedcnt[i] = 0;
917 LIST_INIT(&sq->sq_free);
922 * Find thread sleeping on a wait channel and resume it.
941 KASSERT(sq->sq_type == (flags & SLEEPQ_TYPE),
944 head = &sq->sq_blocked[queue];
949 * avoid spinning on the thread lock.
953 while (besttd->td_lock != &sc->sc_lock) {
961 * Find the highest priority thread on the queue. If there
968 if (td->td_priority < besttd->td_priority)
985 * Resume all threads sleeping on a specified wait channel.
997 KASSERT(sq->sq_type == (flags & SLEEPQ_TYPE),
1005 * Resume threads on the sleep queue that match the given predicate.
1015 * re-enqueue itself before sleepq_resume_thread() returns,
1019 TAILQ_FOREACH_SAFE(td, &sq->sq_blocked[queue], td_slpq, tdn) {
1039 (void *)td, (long)td->td_proc->p_pid, (void *)td->td_name);
1042 if (td->td_sleeptimo == 0 ||
1043 td->td_sleeptimo > td->td_slpcallout.c_time) {
1052 wchan = td->td_wchan;
1054 THREAD_LOCKPTR_ASSERT(td, &sc->sc_lock);
1057 td->td_flags |= TDF_TIMEOUT;
1062 * If the thread is on the SLEEPQ but isn't sleeping
1063 * yet, it can either be on another CPU in between
1067 td->td_flags |= TDF_TIMEOUT;
1074 * wait channel if it is on that queue.
1083 * Look up the sleep queue for this wait channel, then re-check
1084 * that the thread is asleep on that channel, if it is not, then
1089 mtx_lock_spin(&sc->sc_lock);
1091 * We can not lock the thread here as it may be sleeping on a
1096 if (!TD_ON_SLEEPQ(td) || td->td_wchan != wchan) {
1097 mtx_unlock_spin(&sc->sc_lock);
1101 /* Thread is asleep on sleep queue sq, so wake it up. */
1104 MPASS(td->td_wchan == wchan);
1112 * Requires thread lock on entry, releases on return.
1122 MPASS(td->td_flags & TDF_SINTR);
1123 MPASS((intrval == 0 && (td->td_flags & TDF_SIGWAIT) != 0) ||
1130 if (td->td_flags & TDF_TIMEOUT) {
1136 (void *)td, (long)td->td_proc->p_pid, (void *)td->td_name);
1137 td->td_intrval = intrval;
1148 wchan = td->td_wchan;
1153 /* Thread is asleep on sleep queue sq, so wake it up. */
1165 if (LIST_EMPTY(&sc->sc_queues)) {
1168 mtx_lock_spin(&sc->sc_lock);
1169 LIST_FOREACH_SAFE(sq, &sc->sc_queues, sq_hash, sq1) {
1173 mtx_unlock_spin(&sc->sc_lock);
1178 * Prints the stacks of all threads presently sleeping on wchan/queue to
1180 * printed. Typically, this will equal the number of threads sleeping on the
1209 * SBUF_AUTOEXTEND flag on their sbuf, which could cause a
1242 TAILQ_FOREACH_SAFE(td, &sq->sq_blocked[queue], td_slpq,
1251 td->td_tid, td->td_name, td);
1262 sbuf_printf(sb, "--- thread %s: ---\n", sbuf_data(td_infos[i]));
1318 if (sp->sp_wmesg == wmesg)
1323 sp->sp_wmesg = wmesg;
1327 sp->sp_count++;
1348 sp->sp_wmesg = NULL;
1349 sp->sp_count = 0;
1365 if (req->newptr == NULL)
1387 if (req->newptr == NULL)
1417 sp->sp_wmesg, sp->sp_count);
1433 SYSCTL_PROC(_debug_sleepq, OID_AUTO, reset,
1436 "Reset sleepqueue profiling statistics");
1464 LIST_FOREACH(sq, &sc->sc_queues, sq_hash)
1465 if (sq->sq_wchan == wchan)
1481 db_printf("Wait channel: %p\n", sq->sq_wchan);
1482 db_printf("Queue type: %d\n", sq->sq_type);
1484 if (sq->sq_lock) {
1485 lock = sq->sq_lock;
1486 db_printf("Associated Interlock: %p - (%s) %s\n", lock,
1487 LOCK_CLASS(lock)->lc_name, lock->lo_name);
1493 if (TAILQ_EMPTY(&sq->sq_blocked[i]))
1496 TAILQ_FOREACH(td, &sq->sq_blocked[i],
1499 td->td_tid, td->td_proc->p_pid,
1500 td->td_name);
1502 db_printf("(expected: %u)\n", sq->sq_blockedcnt[i]);