xref: /netbsd-src/sys/kern/kern_sleepq.c (revision 627f7eb200a4419d89b531d55fccd2ee3ffdcde0)
1 /*	$NetBSD: kern_sleepq.c,v 1.69 2020/10/23 00:25:45 thorpej Exp $	*/
2 
3 /*-
4  * Copyright (c) 2006, 2007, 2008, 2009, 2019, 2020 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Andrew Doran.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /*
33  * Sleep queue implementation, used by turnstiles and general sleep/wakeup
34  * interfaces.
35  */
36 
37 #include <sys/cdefs.h>
38 __KERNEL_RCSID(0, "$NetBSD: kern_sleepq.c,v 1.69 2020/10/23 00:25:45 thorpej Exp $");
39 
40 #include <sys/param.h>
41 #include <sys/kernel.h>
42 #include <sys/cpu.h>
43 #include <sys/intr.h>
44 #include <sys/pool.h>
45 #include <sys/proc.h>
46 #include <sys/resourcevar.h>
47 #include <sys/sched.h>
48 #include <sys/systm.h>
49 #include <sys/sleepq.h>
50 #include <sys/ktrace.h>
51 
52 /*
53  * for sleepq_abort:
54  * During autoconfiguration or after a panic, a sleep will simply lower the
55  * priority briefly to allow interrupts, then return.  The priority to be
56  * used (IPL_SAFEPRI) is machine-dependent, thus this value is initialized and
57  * maintained in the machine-dependent layers.  This priority will typically
58  * be 0, or the lowest priority that is safe for use on the interrupt stack;
59  * it can be made higher to block network software interrupts after panics.
60  */
61 #ifndef	IPL_SAFEPRI
62 #define	IPL_SAFEPRI	0
63 #endif
64 
65 static int	sleepq_sigtoerror(lwp_t *, int);
66 
67 /* General purpose sleep table, used by mtsleep() and condition variables. */
68 sleeptab_t	sleeptab __cacheline_aligned;
69 sleepqlock_t	sleepq_locks[SLEEPTAB_HASH_SIZE] __cacheline_aligned;
70 
71 /*
72  * sleeptab_init:
73  *
74  *	Initialize a sleep table.
75  */
76 void
77 sleeptab_init(sleeptab_t *st)
78 {
79 	static bool again;
80 	int i;
81 
82 	for (i = 0; i < SLEEPTAB_HASH_SIZE; i++) {
83 		if (!again) {
84 			mutex_init(&sleepq_locks[i].lock, MUTEX_DEFAULT,
85 			    IPL_SCHED);
86 		}
87 		sleepq_init(&st->st_queue[i]);
88 	}
89 	again = true;
90 }
91 
92 /*
93  * sleepq_init:
94  *
95  *	Prepare a sleep queue for use.
96  */
97 void
98 sleepq_init(sleepq_t *sq)
99 {
100 
101 	LIST_INIT(sq);
102 }
103 
104 /*
105  * sleepq_remove:
106  *
107  *	Remove an LWP from a sleep queue and wake it up.
108  */
109 void
110 sleepq_remove(sleepq_t *sq, lwp_t *l)
111 {
112 	struct schedstate_percpu *spc;
113 	struct cpu_info *ci;
114 
115 	KASSERT(lwp_locked(l, NULL));
116 
117 	if ((l->l_syncobj->sobj_flag & SOBJ_SLEEPQ_NULL) == 0) {
118 		KASSERT(sq != NULL);
119 		LIST_REMOVE(l, l_sleepchain);
120 	} else {
121 		KASSERT(sq == NULL);
122 	}
123 
124 	l->l_syncobj = &sched_syncobj;
125 	l->l_wchan = NULL;
126 	l->l_sleepq = NULL;
127 	l->l_flag &= ~LW_SINTR;
128 
129 	ci = l->l_cpu;
130 	spc = &ci->ci_schedstate;
131 
132 	/*
133 	 * If not sleeping, the LWP must have been suspended.  Let whoever
134 	 * holds it stopped set it running again.
135 	 */
136 	if (l->l_stat != LSSLEEP) {
137 		KASSERT(l->l_stat == LSSTOP || l->l_stat == LSSUSPENDED);
138 		lwp_setlock(l, spc->spc_lwplock);
139 		return;
140 	}
141 
142 	/*
143 	 * If the LWP is still on the CPU, mark it as LSONPROC.  It may be
144 	 * about to call mi_switch(), in which case it will yield.
145 	 */
146 	if ((l->l_pflag & LP_RUNNING) != 0) {
147 		l->l_stat = LSONPROC;
148 		l->l_slptime = 0;
149 		lwp_setlock(l, spc->spc_lwplock);
150 		return;
151 	}
152 
153 	/* Update sleep time delta, call the wake-up handler of scheduler */
154 	l->l_slpticksum += (getticks() - l->l_slpticks);
155 	sched_wakeup(l);
156 
157 	/* Look for a CPU to wake up */
158 	l->l_cpu = sched_takecpu(l);
159 	ci = l->l_cpu;
160 	spc = &ci->ci_schedstate;
161 
162 	/*
163 	 * Set it running.
164 	 */
165 	spc_lock(ci);
166 	lwp_setlock(l, spc->spc_mutex);
167 	sched_setrunnable(l);
168 	l->l_stat = LSRUN;
169 	l->l_slptime = 0;
170 	sched_enqueue(l);
171 	sched_resched_lwp(l, true);
172 	/* LWP & SPC now unlocked, but we still hold sleep queue lock. */
173 }
174 
175 /*
176  * sleepq_insert:
177  *
178  *	Insert an LWP into the sleep queue, optionally sorting by priority.
179  */
180 static void
181 sleepq_insert(sleepq_t *sq, lwp_t *l, syncobj_t *sobj)
182 {
183 
184 	if ((sobj->sobj_flag & SOBJ_SLEEPQ_NULL) != 0) {
185 		KASSERT(sq == NULL);
186 		return;
187 	}
188 	KASSERT(sq != NULL);
189 
190 	if ((sobj->sobj_flag & SOBJ_SLEEPQ_SORTED) != 0) {
191 		lwp_t *l2, *l_last = NULL;
192 		const pri_t pri = lwp_eprio(l);
193 
194 		LIST_FOREACH(l2, sq, l_sleepchain) {
195 			l_last = l2;
196 			if (lwp_eprio(l2) < pri) {
197 				LIST_INSERT_BEFORE(l2, l, l_sleepchain);
198 				return;
199 			}
200 		}
201 		/*
202 		 * Ensure FIFO ordering if no waiters are of lower priority.
203 		 */
204 		if (l_last != NULL) {
205 			LIST_INSERT_AFTER(l_last, l, l_sleepchain);
206 			return;
207 		}
208 	}
209 
210 	LIST_INSERT_HEAD(sq, l, l_sleepchain);
211 }
212 
213 /*
214  * sleepq_enqueue:
215  *
216  *	Enter an LWP into the sleep queue and prepare for sleep.  The sleep
217  *	queue must already be locked, and any interlock (such as the kernel
218  *	lock) must have be released (see sleeptab_lookup(), sleepq_enter()).
219  */
220 void
221 sleepq_enqueue(sleepq_t *sq, wchan_t wchan, const char *wmesg, syncobj_t *sobj,
222     bool catch_p)
223 {
224 	lwp_t *l = curlwp;
225 
226 	KASSERT(lwp_locked(l, NULL));
227 	KASSERT(l->l_stat == LSONPROC);
228 	KASSERT(l->l_wchan == NULL && l->l_sleepq == NULL);
229 	KASSERT((l->l_flag & LW_SINTR) == 0);
230 
231 	l->l_syncobj = sobj;
232 	l->l_wchan = wchan;
233 	l->l_sleepq = sq;
234 	l->l_wmesg = wmesg;
235 	l->l_slptime = 0;
236 	l->l_stat = LSSLEEP;
237 	if (catch_p)
238 		l->l_flag |= LW_SINTR;
239 
240 	sleepq_insert(sq, l, sobj);
241 
242 	/* Save the time when thread has slept */
243 	l->l_slpticks = getticks();
244 	sched_slept(l);
245 }
246 
247 /*
248  * sleepq_transfer:
249  *
250  *	Move an LWP from one sleep queue to another.  Both sleep queues
251  *	must already be locked.
252  *
253  *	The LWP will be updated with the new sleepq, wchan, wmesg,
254  *	sobj, and mutex.  The interruptible flag will also be updated.
255  */
256 void
257 sleepq_transfer(lwp_t *l, sleepq_t *from_sq, sleepq_t *sq, wchan_t wchan,
258     const char *wmesg, syncobj_t *sobj, kmutex_t *mp, bool catch_p)
259 {
260 
261 	KASSERT(l->l_sleepq == from_sq);
262 
263 	LIST_REMOVE(l, l_sleepchain);
264 	l->l_syncobj = sobj;
265 	l->l_wchan = wchan;
266 	l->l_sleepq = sq;
267 	l->l_wmesg = wmesg;
268 
269 	if (catch_p)
270 		l->l_flag = LW_SINTR | LW_CATCHINTR;
271 	else
272 		l->l_flag = ~(LW_SINTR | LW_CATCHINTR);
273 
274 	/*
275 	 * This allows the transfer from one sleepq to another where
276 	 * it is known that they're both protected by the same lock.
277 	 */
278 	if (mp != NULL)
279 		lwp_setlock(l, mp);
280 
281 	sleepq_insert(sq, l, sobj);
282 }
283 
284 /*
285  * sleepq_uncatch:
286  *
287  *	Mark the LWP as no longer sleeping interruptibly.
288  */
289 void
290 sleepq_uncatch(lwp_t *l)
291 {
292 	l->l_flag = ~(LW_SINTR | LW_CATCHINTR);
293 }
294 
295 /*
296  * sleepq_block:
297  *
298  *	After any intermediate step such as releasing an interlock, switch.
299  * 	sleepq_block() may return early under exceptional conditions, for
300  * 	example if the LWP's containing process is exiting.
301  *
302  *	timo is a timeout in ticks.  timo = 0 specifies an infinite timeout.
303  */
304 int
305 sleepq_block(int timo, bool catch_p)
306 {
307 	int error = 0, sig;
308 	struct proc *p;
309 	lwp_t *l = curlwp;
310 	bool early = false;
311 	int biglocks = l->l_biglocks;
312 
313 	ktrcsw(1, 0);
314 
315 	/*
316 	 * If sleeping interruptably, check for pending signals, exits or
317 	 * core dump events.
318 	 *
319 	 * Note the usage of LW_CATCHINTR.  This expresses our intent
320 	 * to catch or not catch sleep interruptions, which might change
321 	 * while we are sleeping.  It is independent from LW_SINTR because
322 	 * we don't want to leave LW_SINTR set when the LWP is not asleep.
323 	 */
324 	if (catch_p) {
325 		if ((l->l_flag & (LW_CANCELLED|LW_WEXIT|LW_WCORE)) != 0) {
326 			l->l_flag &= ~LW_CANCELLED;
327 			error = EINTR;
328 			early = true;
329 		} else if ((l->l_flag & LW_PENDSIG) != 0 && sigispending(l, 0))
330 			early = true;
331 		l->l_flag |= LW_CATCHINTR;
332 	} else
333 		l->l_flag &= ~LW_CATCHINTR;
334 
335 	if (early) {
336 		/* lwp_unsleep() will release the lock */
337 		lwp_unsleep(l, true);
338 	} else {
339 		/*
340 		 * The LWP may have already been awoken if the caller
341 		 * dropped the sleep queue lock between sleepq_enqueue() and
342 		 * sleepq_block().  If that happends l_stat will be LSONPROC
343 		 * and mi_switch() will treat this as a preemption.  No need
344 		 * to do anything special here.
345 		 */
346 		if (timo) {
347 			l->l_flag &= ~LW_STIMO;
348 			callout_schedule(&l->l_timeout_ch, timo);
349 		}
350 		spc_lock(l->l_cpu);
351 		mi_switch(l);
352 
353 		/* The LWP and sleep queue are now unlocked. */
354 		if (timo) {
355 			/*
356 			 * Even if the callout appears to have fired, we
357 			 * need to stop it in order to synchronise with
358 			 * other CPUs.  It's important that we do this in
359 			 * this LWP's context, and not during wakeup, in
360 			 * order to keep the callout & its cache lines
361 			 * co-located on the CPU with the LWP.
362 			 */
363 			(void)callout_halt(&l->l_timeout_ch, NULL);
364 			error = (l->l_flag & LW_STIMO) ? EWOULDBLOCK : 0;
365 		}
366 	}
367 
368 	/*
369 	 * LW_CATCHINTR is only modified in this function OR when we
370 	 * are asleep (with the sleepq locked).  We can therefore safely
371 	 * test it unlocked here as it is guaranteed to be stable by
372 	 * virtue of us running.
373 	 *
374 	 * We do not bother clearing it if set; that would require us
375 	 * to take the LWP lock, and it doesn't seem worth the hassle
376 	 * considering it is only meaningful here inside this function,
377 	 * and is set to reflect intent upon entry.
378 	 */
379 	if ((l->l_flag & LW_CATCHINTR) != 0 && error == 0) {
380 		p = l->l_proc;
381 		if ((l->l_flag & (LW_CANCELLED | LW_WEXIT | LW_WCORE)) != 0)
382 			error = EINTR;
383 		else if ((l->l_flag & LW_PENDSIG) != 0) {
384 			/*
385 			 * Acquiring p_lock may cause us to recurse
386 			 * through the sleep path and back into this
387 			 * routine, but is safe because LWPs sleeping
388 			 * on locks are non-interruptable and we will
389 			 * not recurse again.
390 			 */
391 			mutex_enter(p->p_lock);
392 			if (((sig = sigispending(l, 0)) != 0 &&
393 			    (sigprop[sig] & SA_STOP) == 0) ||
394 			    (sig = issignal(l)) != 0)
395 				error = sleepq_sigtoerror(l, sig);
396 			mutex_exit(p->p_lock);
397 		}
398 	}
399 
400 	ktrcsw(0, 0);
401 	if (__predict_false(biglocks != 0)) {
402 		KERNEL_LOCK(biglocks, NULL);
403 	}
404 	return error;
405 }
406 
407 /*
408  * sleepq_wake:
409  *
410  *	Wake zero or more LWPs blocked on a single wait channel.
411  */
412 void
413 sleepq_wake(sleepq_t *sq, wchan_t wchan, u_int expected, kmutex_t *mp)
414 {
415 	lwp_t *l, *next;
416 
417 	KASSERT(mutex_owned(mp));
418 
419 	for (l = LIST_FIRST(sq); l != NULL; l = next) {
420 		KASSERT(l->l_sleepq == sq);
421 		KASSERT(l->l_mutex == mp);
422 		next = LIST_NEXT(l, l_sleepchain);
423 		if (l->l_wchan != wchan)
424 			continue;
425 		sleepq_remove(sq, l);
426 		if (--expected == 0)
427 			break;
428 	}
429 
430 	mutex_spin_exit(mp);
431 }
432 
433 /*
434  * sleepq_unsleep:
435  *
436  *	Remove an LWP from its sleep queue and set it runnable again.
437  *	sleepq_unsleep() is called with the LWP's mutex held, and will
438  *	release it if "unlock" is true.
439  */
440 void
441 sleepq_unsleep(lwp_t *l, bool unlock)
442 {
443 	sleepq_t *sq = l->l_sleepq;
444 	kmutex_t *mp = l->l_mutex;
445 
446 	KASSERT(lwp_locked(l, mp));
447 	KASSERT(l->l_wchan != NULL);
448 
449 	sleepq_remove(sq, l);
450 	if (unlock) {
451 		mutex_spin_exit(mp);
452 	}
453 }
454 
455 /*
456  * sleepq_timeout:
457  *
458  *	Entered via the callout(9) subsystem to time out an LWP that is on a
459  *	sleep queue.
460  */
461 void
462 sleepq_timeout(void *arg)
463 {
464 	lwp_t *l = arg;
465 
466 	/*
467 	 * Lock the LWP.  Assuming it's still on the sleep queue, its
468 	 * current mutex will also be the sleep queue mutex.
469 	 */
470 	lwp_lock(l);
471 
472 	if (l->l_wchan == NULL) {
473 		/* Somebody beat us to it. */
474 		lwp_unlock(l);
475 		return;
476 	}
477 
478 	l->l_flag |= LW_STIMO;
479 	lwp_unsleep(l, true);
480 }
481 
482 /*
483  * sleepq_sigtoerror:
484  *
485  *	Given a signal number, interpret and return an error code.
486  */
487 static int
488 sleepq_sigtoerror(lwp_t *l, int sig)
489 {
490 	struct proc *p = l->l_proc;
491 	int error;
492 
493 	KASSERT(mutex_owned(p->p_lock));
494 
495 	/*
496 	 * If this sleep was canceled, don't let the syscall restart.
497 	 */
498 	if ((SIGACTION(p, sig).sa_flags & SA_RESTART) == 0)
499 		error = EINTR;
500 	else
501 		error = ERESTART;
502 
503 	return error;
504 }
505 
506 /*
507  * sleepq_abort:
508  *
509  *	After a panic or during autoconfiguration, lower the interrupt
510  *	priority level to give pending interrupts a chance to run, and
511  *	then return.  Called if sleepq_dontsleep() returns non-zero, and
512  *	always returns zero.
513  */
514 int
515 sleepq_abort(kmutex_t *mtx, int unlock)
516 {
517 	int s;
518 
519 	s = splhigh();
520 	splx(IPL_SAFEPRI);
521 	splx(s);
522 	if (mtx != NULL && unlock != 0)
523 		mutex_exit(mtx);
524 
525 	return 0;
526 }
527 
528 /*
529  * sleepq_reinsert:
530  *
531  *	Move the possition of the lwp in the sleep queue after a possible
532  *	change of the lwp's effective priority.
533  */
534 static void
535 sleepq_reinsert(sleepq_t *sq, lwp_t *l)
536 {
537 
538 	KASSERT(l->l_sleepq == sq);
539 	if ((l->l_syncobj->sobj_flag & SOBJ_SLEEPQ_SORTED) == 0) {
540 		return;
541 	}
542 
543 	/*
544 	 * Don't let the sleep queue become empty, even briefly.
545 	 * cv_signal() and cv_broadcast() inspect it without the
546 	 * sleep queue lock held and need to see a non-empty queue
547 	 * head if there are waiters.
548 	 */
549 	if (LIST_FIRST(sq) == l && LIST_NEXT(l, l_sleepchain) == NULL) {
550 		return;
551 	}
552 	LIST_REMOVE(l, l_sleepchain);
553 	sleepq_insert(sq, l, l->l_syncobj);
554 }
555 
556 /*
557  * sleepq_changepri:
558  *
559  *	Adjust the priority of an LWP residing on a sleepq.
560  */
561 void
562 sleepq_changepri(lwp_t *l, pri_t pri)
563 {
564 	sleepq_t *sq = l->l_sleepq;
565 
566 	KASSERT(lwp_locked(l, NULL));
567 
568 	l->l_priority = pri;
569 	sleepq_reinsert(sq, l);
570 }
571 
572 /*
573  * sleepq_changepri:
574  *
575  *	Adjust the lended priority of an LWP residing on a sleepq.
576  */
577 void
578 sleepq_lendpri(lwp_t *l, pri_t pri)
579 {
580 	sleepq_t *sq = l->l_sleepq;
581 
582 	KASSERT(lwp_locked(l, NULL));
583 
584 	l->l_inheritedprio = pri;
585 	l->l_auxprio = MAX(l->l_inheritedprio, l->l_protectprio);
586 	sleepq_reinsert(sq, l);
587 }
588