xref: /freebsd-src/sys/contrib/openzfs/module/os/linux/spl/spl-condvar.c (revision 16d6b3b3da62aa5baaf3c66c8d4e6f8c8f70aeb7)
1 /*
2  *  Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3  *  Copyright (C) 2007 The Regents of the University of California.
4  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5  *  Written by Brian Behlendorf <behlendorf1@llnl.gov>.
6  *  UCRL-CODE-235197
7  *
8  *  This file is part of the SPL, Solaris Porting Layer.
9  *  For details, see <http://zfsonlinux.org/>.
10  *
11  *  The SPL is free software; you can redistribute it and/or modify it
12  *  under the terms of the GNU General Public License as published by the
13  *  Free Software Foundation; either version 2 of the License, or (at your
14  *  option) any later version.
15  *
16  *  The SPL is distributed in the hope that it will be useful, but WITHOUT
17  *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18  *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
19  *  for more details.
20  *
21  *  You should have received a copy of the GNU General Public License along
22  *  with the SPL.  If not, see <http://www.gnu.org/licenses/>.
23  *
24  *  Solaris Porting Layer (SPL) Credential Implementation.
25  */
26 
27 #include <sys/condvar.h>
28 #include <sys/time.h>
29 #include <sys/sysmacros.h>
30 #include <linux/hrtimer.h>
31 #include <linux/compiler_compat.h>
32 #include <linux/mod_compat.h>
33 
34 #include <linux/sched.h>
35 
36 #ifdef HAVE_SCHED_SIGNAL_HEADER
37 #include <linux/sched/signal.h>
38 #endif
39 
40 #define	MAX_HRTIMEOUT_SLACK_US	1000
41 unsigned int spl_schedule_hrtimeout_slack_us = 0;
42 
43 static int
44 param_set_hrtimeout_slack(const char *buf, zfs_kernel_param_t *kp)
45 {
46 	unsigned long val;
47 	int error;
48 
49 	error = kstrtoul(buf, 0, &val);
50 	if (error)
51 		return (error);
52 
53 	if (val > MAX_HRTIMEOUT_SLACK_US)
54 		return (-EINVAL);
55 
56 	error = param_set_uint(buf, kp);
57 	if (error < 0)
58 		return (error);
59 
60 	return (0);
61 }
62 
63 module_param_call(spl_schedule_hrtimeout_slack_us, param_set_hrtimeout_slack,
64 	param_get_uint, &spl_schedule_hrtimeout_slack_us, 0644);
65 MODULE_PARM_DESC(spl_schedule_hrtimeout_slack_us,
66 	"schedule_hrtimeout_range() delta/slack value in us, default(0)");
67 
68 void
69 __cv_init(kcondvar_t *cvp, char *name, kcv_type_t type, void *arg)
70 {
71 	ASSERT(cvp);
72 	ASSERT(name == NULL);
73 	ASSERT(type == CV_DEFAULT);
74 	ASSERT(arg == NULL);
75 
76 	cvp->cv_magic = CV_MAGIC;
77 	init_waitqueue_head(&cvp->cv_event);
78 	init_waitqueue_head(&cvp->cv_destroy);
79 	atomic_set(&cvp->cv_waiters, 0);
80 	atomic_set(&cvp->cv_refs, 1);
81 	cvp->cv_mutex = NULL;
82 }
83 EXPORT_SYMBOL(__cv_init);
84 
85 static int
86 cv_destroy_wakeup(kcondvar_t *cvp)
87 {
88 	if (!atomic_read(&cvp->cv_waiters) && !atomic_read(&cvp->cv_refs)) {
89 		ASSERT(cvp->cv_mutex == NULL);
90 		ASSERT(!waitqueue_active(&cvp->cv_event));
91 		return (1);
92 	}
93 
94 	return (0);
95 }
96 
97 void
98 __cv_destroy(kcondvar_t *cvp)
99 {
100 	ASSERT(cvp);
101 	ASSERT(cvp->cv_magic == CV_MAGIC);
102 
103 	cvp->cv_magic = CV_DESTROY;
104 	atomic_dec(&cvp->cv_refs);
105 
106 	/* Block until all waiters are woken and references dropped. */
107 	while (cv_destroy_wakeup(cvp) == 0)
108 		wait_event_timeout(cvp->cv_destroy, cv_destroy_wakeup(cvp), 1);
109 
110 	ASSERT3P(cvp->cv_mutex, ==, NULL);
111 	ASSERT3S(atomic_read(&cvp->cv_refs), ==, 0);
112 	ASSERT3S(atomic_read(&cvp->cv_waiters), ==, 0);
113 	ASSERT3S(waitqueue_active(&cvp->cv_event), ==, 0);
114 }
115 EXPORT_SYMBOL(__cv_destroy);
116 
117 static void
118 cv_wait_common(kcondvar_t *cvp, kmutex_t *mp, int state, int io)
119 {
120 	DEFINE_WAIT(wait);
121 	kmutex_t *m;
122 
123 	ASSERT(cvp);
124 	ASSERT(mp);
125 	ASSERT(cvp->cv_magic == CV_MAGIC);
126 	ASSERT(mutex_owned(mp));
127 	atomic_inc(&cvp->cv_refs);
128 
129 	m = READ_ONCE(cvp->cv_mutex);
130 	if (!m)
131 		m = xchg(&cvp->cv_mutex, mp);
132 	/* Ensure the same mutex is used by all callers */
133 	ASSERT(m == NULL || m == mp);
134 
135 	prepare_to_wait_exclusive(&cvp->cv_event, &wait, state);
136 	atomic_inc(&cvp->cv_waiters);
137 
138 	/*
139 	 * Mutex should be dropped after prepare_to_wait() this
140 	 * ensures we're linked in to the waiters list and avoids the
141 	 * race where 'cvp->cv_waiters > 0' but the list is empty.
142 	 */
143 	mutex_exit(mp);
144 	if (io)
145 		io_schedule();
146 	else
147 		schedule();
148 
149 	/* No more waiters a different mutex could be used */
150 	if (atomic_dec_and_test(&cvp->cv_waiters)) {
151 		/*
152 		 * This is set without any lock, so it's racy. But this is
153 		 * just for debug anyway, so make it best-effort
154 		 */
155 		cvp->cv_mutex = NULL;
156 		wake_up(&cvp->cv_destroy);
157 	}
158 
159 	finish_wait(&cvp->cv_event, &wait);
160 	atomic_dec(&cvp->cv_refs);
161 
162 	/*
163 	 * Hold mutex after we release the cvp, otherwise we could dead lock
164 	 * with a thread holding the mutex and call cv_destroy.
165 	 */
166 	mutex_enter(mp);
167 }
168 
169 void
170 __cv_wait(kcondvar_t *cvp, kmutex_t *mp)
171 {
172 	cv_wait_common(cvp, mp, TASK_UNINTERRUPTIBLE, 0);
173 }
174 EXPORT_SYMBOL(__cv_wait);
175 
176 void
177 __cv_wait_io(kcondvar_t *cvp, kmutex_t *mp)
178 {
179 	cv_wait_common(cvp, mp, TASK_UNINTERRUPTIBLE, 1);
180 }
181 EXPORT_SYMBOL(__cv_wait_io);
182 
183 int
184 __cv_wait_io_sig(kcondvar_t *cvp, kmutex_t *mp)
185 {
186 	cv_wait_common(cvp, mp, TASK_INTERRUPTIBLE, 1);
187 
188 	return (signal_pending(current) ? 0 : 1);
189 }
190 EXPORT_SYMBOL(__cv_wait_io_sig);
191 
192 int
193 __cv_wait_sig(kcondvar_t *cvp, kmutex_t *mp)
194 {
195 	cv_wait_common(cvp, mp, TASK_INTERRUPTIBLE, 0);
196 
197 	return (signal_pending(current) ? 0 : 1);
198 }
199 EXPORT_SYMBOL(__cv_wait_sig);
200 
201 #if defined(HAVE_IO_SCHEDULE_TIMEOUT)
202 #define	spl_io_schedule_timeout(t)	io_schedule_timeout(t)
203 #else
204 
205 struct spl_task_timer {
206 	struct timer_list timer;
207 	struct task_struct *task;
208 };
209 
210 static void
211 __cv_wakeup(spl_timer_list_t t)
212 {
213 	struct timer_list *tmr = (struct timer_list *)t;
214 	struct spl_task_timer *task_timer = from_timer(task_timer, tmr, timer);
215 
216 	wake_up_process(task_timer->task);
217 }
218 
219 static long
220 spl_io_schedule_timeout(long time_left)
221 {
222 	long expire_time = jiffies + time_left;
223 	struct spl_task_timer task_timer;
224 	struct timer_list *timer = &task_timer.timer;
225 
226 	task_timer.task = current;
227 
228 	timer_setup(timer, __cv_wakeup, 0);
229 
230 	timer->expires = expire_time;
231 	add_timer(timer);
232 
233 	io_schedule();
234 
235 	del_timer_sync(timer);
236 
237 	time_left = expire_time - jiffies;
238 
239 	return (time_left < 0 ? 0 : time_left);
240 }
241 #endif
242 
243 /*
244  * 'expire_time' argument is an absolute wall clock time in jiffies.
245  * Return value is time left (expire_time - now) or -1 if timeout occurred.
246  */
247 static clock_t
248 __cv_timedwait_common(kcondvar_t *cvp, kmutex_t *mp, clock_t expire_time,
249     int state, int io)
250 {
251 	DEFINE_WAIT(wait);
252 	kmutex_t *m;
253 	clock_t time_left;
254 
255 	ASSERT(cvp);
256 	ASSERT(mp);
257 	ASSERT(cvp->cv_magic == CV_MAGIC);
258 	ASSERT(mutex_owned(mp));
259 
260 	/* XXX - Does not handle jiffie wrap properly */
261 	time_left = expire_time - jiffies;
262 	if (time_left <= 0)
263 		return (-1);
264 
265 	atomic_inc(&cvp->cv_refs);
266 	m = READ_ONCE(cvp->cv_mutex);
267 	if (!m)
268 		m = xchg(&cvp->cv_mutex, mp);
269 	/* Ensure the same mutex is used by all callers */
270 	ASSERT(m == NULL || m == mp);
271 
272 	prepare_to_wait_exclusive(&cvp->cv_event, &wait, state);
273 	atomic_inc(&cvp->cv_waiters);
274 
275 	/*
276 	 * Mutex should be dropped after prepare_to_wait() this
277 	 * ensures we're linked in to the waiters list and avoids the
278 	 * race where 'cvp->cv_waiters > 0' but the list is empty.
279 	 */
280 	mutex_exit(mp);
281 	if (io)
282 		time_left = spl_io_schedule_timeout(time_left);
283 	else
284 		time_left = schedule_timeout(time_left);
285 
286 	/* No more waiters a different mutex could be used */
287 	if (atomic_dec_and_test(&cvp->cv_waiters)) {
288 		/*
289 		 * This is set without any lock, so it's racy. But this is
290 		 * just for debug anyway, so make it best-effort
291 		 */
292 		cvp->cv_mutex = NULL;
293 		wake_up(&cvp->cv_destroy);
294 	}
295 
296 	finish_wait(&cvp->cv_event, &wait);
297 	atomic_dec(&cvp->cv_refs);
298 
299 	/*
300 	 * Hold mutex after we release the cvp, otherwise we could dead lock
301 	 * with a thread holding the mutex and call cv_destroy.
302 	 */
303 	mutex_enter(mp);
304 	return (time_left > 0 ? 1 : -1);
305 }
306 
307 int
308 __cv_timedwait(kcondvar_t *cvp, kmutex_t *mp, clock_t exp_time)
309 {
310 	return (__cv_timedwait_common(cvp, mp, exp_time,
311 	    TASK_UNINTERRUPTIBLE, 0));
312 }
313 EXPORT_SYMBOL(__cv_timedwait);
314 
315 int
316 __cv_timedwait_io(kcondvar_t *cvp, kmutex_t *mp, clock_t exp_time)
317 {
318 	return (__cv_timedwait_common(cvp, mp, exp_time,
319 	    TASK_UNINTERRUPTIBLE, 1));
320 }
321 EXPORT_SYMBOL(__cv_timedwait_io);
322 
323 int
324 __cv_timedwait_sig(kcondvar_t *cvp, kmutex_t *mp, clock_t exp_time)
325 {
326 	int rc;
327 
328 	rc = __cv_timedwait_common(cvp, mp, exp_time, TASK_INTERRUPTIBLE, 0);
329 	return (signal_pending(current) ? 0 : rc);
330 }
331 EXPORT_SYMBOL(__cv_timedwait_sig);
332 
333 /*
334  * 'expire_time' argument is an absolute clock time in nanoseconds.
335  * Return value is time left (expire_time - now) or -1 if timeout occurred.
336  */
337 static clock_t
338 __cv_timedwait_hires(kcondvar_t *cvp, kmutex_t *mp, hrtime_t expire_time,
339     hrtime_t res, int state)
340 {
341 	DEFINE_WAIT(wait);
342 	kmutex_t *m;
343 	hrtime_t time_left;
344 	ktime_t ktime_left;
345 	u64 slack = 0;
346 	int rc;
347 
348 	ASSERT(cvp);
349 	ASSERT(mp);
350 	ASSERT(cvp->cv_magic == CV_MAGIC);
351 	ASSERT(mutex_owned(mp));
352 
353 	time_left = expire_time - gethrtime();
354 	if (time_left <= 0)
355 		return (-1);
356 
357 	atomic_inc(&cvp->cv_refs);
358 	m = READ_ONCE(cvp->cv_mutex);
359 	if (!m)
360 		m = xchg(&cvp->cv_mutex, mp);
361 	/* Ensure the same mutex is used by all callers */
362 	ASSERT(m == NULL || m == mp);
363 
364 	prepare_to_wait_exclusive(&cvp->cv_event, &wait, state);
365 	atomic_inc(&cvp->cv_waiters);
366 
367 	/*
368 	 * Mutex should be dropped after prepare_to_wait() this
369 	 * ensures we're linked in to the waiters list and avoids the
370 	 * race where 'cvp->cv_waiters > 0' but the list is empty.
371 	 */
372 	mutex_exit(mp);
373 
374 	ktime_left = ktime_set(0, time_left);
375 	slack = MIN(MAX(res, spl_schedule_hrtimeout_slack_us * NSEC_PER_USEC),
376 	    MAX_HRTIMEOUT_SLACK_US * NSEC_PER_USEC);
377 	rc = schedule_hrtimeout_range(&ktime_left, slack, HRTIMER_MODE_REL);
378 
379 	/* No more waiters a different mutex could be used */
380 	if (atomic_dec_and_test(&cvp->cv_waiters)) {
381 		/*
382 		 * This is set without any lock, so it's racy. But this is
383 		 * just for debug anyway, so make it best-effort
384 		 */
385 		cvp->cv_mutex = NULL;
386 		wake_up(&cvp->cv_destroy);
387 	}
388 
389 	finish_wait(&cvp->cv_event, &wait);
390 	atomic_dec(&cvp->cv_refs);
391 
392 	mutex_enter(mp);
393 	return (rc == -EINTR ? 1 : -1);
394 }
395 
396 /*
397  * Compatibility wrapper for the cv_timedwait_hires() Illumos interface.
398  */
399 static int
400 cv_timedwait_hires_common(kcondvar_t *cvp, kmutex_t *mp, hrtime_t tim,
401     hrtime_t res, int flag, int state)
402 {
403 	if (!(flag & CALLOUT_FLAG_ABSOLUTE))
404 		tim += gethrtime();
405 
406 	return (__cv_timedwait_hires(cvp, mp, tim, res, state));
407 }
408 
409 int
410 cv_timedwait_hires(kcondvar_t *cvp, kmutex_t *mp, hrtime_t tim, hrtime_t res,
411     int flag)
412 {
413 	return (cv_timedwait_hires_common(cvp, mp, tim, res, flag,
414 	    TASK_UNINTERRUPTIBLE));
415 }
416 EXPORT_SYMBOL(cv_timedwait_hires);
417 
418 int
419 cv_timedwait_sig_hires(kcondvar_t *cvp, kmutex_t *mp, hrtime_t tim,
420     hrtime_t res, int flag)
421 {
422 	int rc;
423 
424 	rc = cv_timedwait_hires_common(cvp, mp, tim, res, flag,
425 	    TASK_INTERRUPTIBLE);
426 	return (signal_pending(current) ? 0 : rc);
427 }
428 EXPORT_SYMBOL(cv_timedwait_sig_hires);
429 
430 void
431 __cv_signal(kcondvar_t *cvp)
432 {
433 	ASSERT(cvp);
434 	ASSERT(cvp->cv_magic == CV_MAGIC);
435 	atomic_inc(&cvp->cv_refs);
436 
437 	/*
438 	 * All waiters are added with WQ_FLAG_EXCLUSIVE so only one
439 	 * waiter will be set runnable with each call to wake_up().
440 	 * Additionally wake_up() holds a spin_lock associated with
441 	 * the wait queue to ensure we don't race waking up processes.
442 	 */
443 	if (atomic_read(&cvp->cv_waiters) > 0)
444 		wake_up(&cvp->cv_event);
445 
446 	atomic_dec(&cvp->cv_refs);
447 }
448 EXPORT_SYMBOL(__cv_signal);
449 
450 void
451 __cv_broadcast(kcondvar_t *cvp)
452 {
453 	ASSERT(cvp);
454 	ASSERT(cvp->cv_magic == CV_MAGIC);
455 	atomic_inc(&cvp->cv_refs);
456 
457 	/*
458 	 * Wake_up_all() will wake up all waiters even those which
459 	 * have the WQ_FLAG_EXCLUSIVE flag set.
460 	 */
461 	if (atomic_read(&cvp->cv_waiters) > 0)
462 		wake_up_all(&cvp->cv_event);
463 
464 	atomic_dec(&cvp->cv_refs);
465 }
466 EXPORT_SYMBOL(__cv_broadcast);
467