xref: /netbsd-src/sys/external/bsd/drm2/dist/drm/drm_modeset_lock.c (revision 41ec02673d281bbb3d38e6c78504ce6e30c228c1)
1*41ec0267Sriastradh /*	$NetBSD: drm_modeset_lock.c,v 1.5 2021/12/18 23:44:57 riastradh Exp $	*/
2efa246c0Sriastradh 
3efa246c0Sriastradh /*
4efa246c0Sriastradh  * Copyright (C) 2014 Red Hat
5efa246c0Sriastradh  * Author: Rob Clark <robdclark@gmail.com>
6efa246c0Sriastradh  *
7efa246c0Sriastradh  * Permission is hereby granted, free of charge, to any person obtaining a
8efa246c0Sriastradh  * copy of this software and associated documentation files (the "Software"),
9efa246c0Sriastradh  * to deal in the Software without restriction, including without limitation
10efa246c0Sriastradh  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
11efa246c0Sriastradh  * and/or sell copies of the Software, and to permit persons to whom the
12efa246c0Sriastradh  * Software is furnished to do so, subject to the following conditions:
13efa246c0Sriastradh  *
14efa246c0Sriastradh  * The above copyright notice and this permission notice shall be included in
15efa246c0Sriastradh  * all copies or substantial portions of the Software.
16efa246c0Sriastradh  *
17efa246c0Sriastradh  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18efa246c0Sriastradh  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19efa246c0Sriastradh  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
20efa246c0Sriastradh  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
21efa246c0Sriastradh  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22efa246c0Sriastradh  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
23efa246c0Sriastradh  * OTHER DEALINGS IN THE SOFTWARE.
24efa246c0Sriastradh  */
25efa246c0Sriastradh 
26efa246c0Sriastradh #include <sys/cdefs.h>
27*41ec0267Sriastradh __KERNEL_RCSID(0, "$NetBSD: drm_modeset_lock.c,v 1.5 2021/12/18 23:44:57 riastradh Exp $");
28efa246c0Sriastradh 
29*41ec0267Sriastradh #include <drm/drm_atomic.h>
30efa246c0Sriastradh #include <drm/drm_crtc.h>
31*41ec0267Sriastradh #include <drm/drm_device.h>
32efa246c0Sriastradh #include <drm/drm_modeset_lock.h>
33efa246c0Sriastradh 
34efa246c0Sriastradh /**
35efa246c0Sriastradh  * DOC: kms locking
36efa246c0Sriastradh  *
37efa246c0Sriastradh  * As KMS moves toward more fine grained locking, and atomic ioctl where
38efa246c0Sriastradh  * userspace can indirectly control locking order, it becomes necessary
39*41ec0267Sriastradh  * to use &ww_mutex and acquire-contexts to avoid deadlocks.  But because
40efa246c0Sriastradh  * the locking is more distributed around the driver code, we want a bit
41efa246c0Sriastradh  * of extra utility/tracking out of our acquire-ctx.  This is provided
42*41ec0267Sriastradh  * by &struct drm_modeset_lock and &struct drm_modeset_acquire_ctx.
43efa246c0Sriastradh  *
44*41ec0267Sriastradh  * For basic principles of &ww_mutex, see: Documentation/locking/ww-mutex-design.rst
45efa246c0Sriastradh  *
46*41ec0267Sriastradh  * The basic usage pattern is to::
47efa246c0Sriastradh  *
48*41ec0267Sriastradh  *     drm_modeset_acquire_init(ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE)
49efa246c0Sriastradh  *     retry:
50efa246c0Sriastradh  *     foreach (lock in random_ordered_set_of_locks) {
51*41ec0267Sriastradh  *         ret = drm_modeset_lock(lock, ctx)
52efa246c0Sriastradh  *         if (ret == -EDEADLK) {
53*41ec0267Sriastradh  *             ret = drm_modeset_backoff(ctx);
54*41ec0267Sriastradh  *             if (!ret)
55efa246c0Sriastradh  *                 goto retry;
56efa246c0Sriastradh  *         }
57*41ec0267Sriastradh  *         if (ret)
58*41ec0267Sriastradh  *             goto out;
59efa246c0Sriastradh  *     }
60efa246c0Sriastradh  *     ... do stuff ...
61*41ec0267Sriastradh  *     out:
62*41ec0267Sriastradh  *     drm_modeset_drop_locks(ctx);
63*41ec0267Sriastradh  *     drm_modeset_acquire_fini(ctx);
64efa246c0Sriastradh  *
65*41ec0267Sriastradh  * For convenience this control flow is implemented in
66*41ec0267Sriastradh  * DRM_MODESET_LOCK_ALL_BEGIN() and DRM_MODESET_LOCK_ALL_END() for the case
67*41ec0267Sriastradh  * where all modeset locks need to be taken through drm_modeset_lock_all_ctx().
68*41ec0267Sriastradh  *
69*41ec0267Sriastradh  * If all that is needed is a single modeset lock, then the &struct
70*41ec0267Sriastradh  * drm_modeset_acquire_ctx is not needed and the locking can be simplified
71*41ec0267Sriastradh  * by passing a NULL instead of ctx in the drm_modeset_lock() call or
72*41ec0267Sriastradh  * calling  drm_modeset_lock_single_interruptible(). To unlock afterwards
73*41ec0267Sriastradh  * call drm_modeset_unlock().
74*41ec0267Sriastradh  *
75*41ec0267Sriastradh  * On top of these per-object locks using &ww_mutex there's also an overall
76*41ec0267Sriastradh  * &drm_mode_config.mutex, for protecting everything else. Mostly this means
77*41ec0267Sriastradh  * probe state of connectors, and preventing hotplug add/removal of connectors.
78*41ec0267Sriastradh  *
79*41ec0267Sriastradh  * Finally there's a bunch of dedicated locks to protect drm core internal
80*41ec0267Sriastradh  * lists and lookup data structures.
81efa246c0Sriastradh  */
82efa246c0Sriastradh 
83*41ec0267Sriastradh static DEFINE_WW_CLASS(crtc_ww_class);
84*41ec0267Sriastradh 
85efa246c0Sriastradh /**
86efa246c0Sriastradh  * drm_modeset_lock_all - take all modeset locks
87*41ec0267Sriastradh  * @dev: DRM device
88efa246c0Sriastradh  *
89efa246c0Sriastradh  * This function takes all modeset locks, suitable where a more fine-grained
90*41ec0267Sriastradh  * scheme isn't (yet) implemented. Locks must be dropped by calling the
91*41ec0267Sriastradh  * drm_modeset_unlock_all() function.
92*41ec0267Sriastradh  *
93*41ec0267Sriastradh  * This function is deprecated. It allocates a lock acquisition context and
94*41ec0267Sriastradh  * stores it in &drm_device.mode_config. This facilitate conversion of
95*41ec0267Sriastradh  * existing code because it removes the need to manually deal with the
96*41ec0267Sriastradh  * acquisition context, but it is also brittle because the context is global
97*41ec0267Sriastradh  * and care must be taken not to nest calls. New code should use the
98*41ec0267Sriastradh  * drm_modeset_lock_all_ctx() function and pass in the context explicitly.
99efa246c0Sriastradh  */
drm_modeset_lock_all(struct drm_device * dev)100efa246c0Sriastradh void drm_modeset_lock_all(struct drm_device *dev)
101efa246c0Sriastradh {
102efa246c0Sriastradh 	struct drm_mode_config *config = &dev->mode_config;
103efa246c0Sriastradh 	struct drm_modeset_acquire_ctx *ctx;
104efa246c0Sriastradh 	int ret;
105efa246c0Sriastradh 
106efa246c0Sriastradh 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL | __GFP_NOFAIL);
107efa246c0Sriastradh 	if (WARN_ON(!ctx))
108efa246c0Sriastradh 		return;
109efa246c0Sriastradh 
110efa246c0Sriastradh 	mutex_lock(&config->mutex);
111efa246c0Sriastradh 
112efa246c0Sriastradh 	drm_modeset_acquire_init(ctx, 0);
113efa246c0Sriastradh 
114efa246c0Sriastradh retry:
115*41ec0267Sriastradh 	ret = drm_modeset_lock_all_ctx(dev, ctx);
116*41ec0267Sriastradh 	if (ret < 0) {
117efa246c0Sriastradh 		if (ret == -EDEADLK) {
118efa246c0Sriastradh 			drm_modeset_backoff(ctx);
119efa246c0Sriastradh 			goto retry;
120efa246c0Sriastradh 		}
121efa246c0Sriastradh 
122*41ec0267Sriastradh 		drm_modeset_acquire_fini(ctx);
123efa246c0Sriastradh 		kfree(ctx);
124*41ec0267Sriastradh 		return;
125*41ec0267Sriastradh 	}
126*41ec0267Sriastradh 	ww_acquire_done(&ctx->ww_ctx);
127*41ec0267Sriastradh 
128*41ec0267Sriastradh 	WARN_ON(config->acquire_ctx);
129*41ec0267Sriastradh 
130*41ec0267Sriastradh 	/*
131*41ec0267Sriastradh 	 * We hold the locks now, so it is safe to stash the acquisition
132*41ec0267Sriastradh 	 * context for drm_modeset_unlock_all().
133*41ec0267Sriastradh 	 */
134*41ec0267Sriastradh 	config->acquire_ctx = ctx;
135*41ec0267Sriastradh 
136*41ec0267Sriastradh 	drm_warn_on_modeset_not_all_locked(dev);
137efa246c0Sriastradh }
138efa246c0Sriastradh EXPORT_SYMBOL(drm_modeset_lock_all);
139efa246c0Sriastradh 
140efa246c0Sriastradh /**
141efa246c0Sriastradh  * drm_modeset_unlock_all - drop all modeset locks
142*41ec0267Sriastradh  * @dev: DRM device
143efa246c0Sriastradh  *
144*41ec0267Sriastradh  * This function drops all modeset locks taken by a previous call to the
145*41ec0267Sriastradh  * drm_modeset_lock_all() function.
146*41ec0267Sriastradh  *
147*41ec0267Sriastradh  * This function is deprecated. It uses the lock acquisition context stored
148*41ec0267Sriastradh  * in &drm_device.mode_config. This facilitates conversion of existing
149*41ec0267Sriastradh  * code because it removes the need to manually deal with the acquisition
150*41ec0267Sriastradh  * context, but it is also brittle because the context is global and care must
151*41ec0267Sriastradh  * be taken not to nest calls. New code should pass the acquisition context
152*41ec0267Sriastradh  * directly to the drm_modeset_drop_locks() function.
153efa246c0Sriastradh  */
drm_modeset_unlock_all(struct drm_device * dev)154efa246c0Sriastradh void drm_modeset_unlock_all(struct drm_device *dev)
155efa246c0Sriastradh {
156efa246c0Sriastradh 	struct drm_mode_config *config = &dev->mode_config;
157efa246c0Sriastradh 	struct drm_modeset_acquire_ctx *ctx = config->acquire_ctx;
158efa246c0Sriastradh 
159efa246c0Sriastradh 	if (WARN_ON(!ctx))
160efa246c0Sriastradh 		return;
161efa246c0Sriastradh 
162efa246c0Sriastradh 	config->acquire_ctx = NULL;
163efa246c0Sriastradh 	drm_modeset_drop_locks(ctx);
164efa246c0Sriastradh 	drm_modeset_acquire_fini(ctx);
165efa246c0Sriastradh 
166efa246c0Sriastradh 	kfree(ctx);
167efa246c0Sriastradh 
168efa246c0Sriastradh 	mutex_unlock(&dev->mode_config.mutex);
169efa246c0Sriastradh }
170efa246c0Sriastradh EXPORT_SYMBOL(drm_modeset_unlock_all);
171efa246c0Sriastradh 
172efa246c0Sriastradh /**
173efa246c0Sriastradh  * drm_warn_on_modeset_not_all_locked - check that all modeset locks are locked
174efa246c0Sriastradh  * @dev: device
175efa246c0Sriastradh  *
176efa246c0Sriastradh  * Useful as a debug assert.
177efa246c0Sriastradh  */
drm_warn_on_modeset_not_all_locked(struct drm_device * dev)178efa246c0Sriastradh void drm_warn_on_modeset_not_all_locked(struct drm_device *dev)
179efa246c0Sriastradh {
180efa246c0Sriastradh 	struct drm_crtc *crtc;
181efa246c0Sriastradh 
182efa246c0Sriastradh 	/* Locking is currently fubar in the panic handler. */
183efa246c0Sriastradh 	if (oops_in_progress)
184efa246c0Sriastradh 		return;
185efa246c0Sriastradh 
186efa246c0Sriastradh 	drm_for_each_crtc(crtc, dev)
187efa246c0Sriastradh 		WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
188efa246c0Sriastradh 
189efa246c0Sriastradh 	WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
190efa246c0Sriastradh 	WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
191efa246c0Sriastradh }
192efa246c0Sriastradh EXPORT_SYMBOL(drm_warn_on_modeset_not_all_locked);
193efa246c0Sriastradh 
194efa246c0Sriastradh /**
195efa246c0Sriastradh  * drm_modeset_acquire_init - initialize acquire context
196efa246c0Sriastradh  * @ctx: the acquire context
197*41ec0267Sriastradh  * @flags: 0 or %DRM_MODESET_ACQUIRE_INTERRUPTIBLE
198*41ec0267Sriastradh  *
199*41ec0267Sriastradh  * When passing %DRM_MODESET_ACQUIRE_INTERRUPTIBLE to @flags,
200*41ec0267Sriastradh  * all calls to drm_modeset_lock() will perform an interruptible
201*41ec0267Sriastradh  * wait.
202efa246c0Sriastradh  */
drm_modeset_acquire_init(struct drm_modeset_acquire_ctx * ctx,uint32_t flags)203efa246c0Sriastradh void drm_modeset_acquire_init(struct drm_modeset_acquire_ctx *ctx,
204efa246c0Sriastradh 		uint32_t flags)
205efa246c0Sriastradh {
206efa246c0Sriastradh 	memset(ctx, 0, sizeof(*ctx));
207efa246c0Sriastradh 	ww_acquire_init(&ctx->ww_ctx, &crtc_ww_class);
208efa246c0Sriastradh 	INIT_LIST_HEAD(&ctx->locked);
209*41ec0267Sriastradh 
210*41ec0267Sriastradh 	if (flags & DRM_MODESET_ACQUIRE_INTERRUPTIBLE)
211*41ec0267Sriastradh 		ctx->interruptible = true;
212efa246c0Sriastradh }
213efa246c0Sriastradh EXPORT_SYMBOL(drm_modeset_acquire_init);
214efa246c0Sriastradh 
215efa246c0Sriastradh /**
216efa246c0Sriastradh  * drm_modeset_acquire_fini - cleanup acquire context
217efa246c0Sriastradh  * @ctx: the acquire context
218efa246c0Sriastradh  */
drm_modeset_acquire_fini(struct drm_modeset_acquire_ctx * ctx)219efa246c0Sriastradh void drm_modeset_acquire_fini(struct drm_modeset_acquire_ctx *ctx)
220efa246c0Sriastradh {
221efa246c0Sriastradh 	ww_acquire_fini(&ctx->ww_ctx);
222efa246c0Sriastradh }
223efa246c0Sriastradh EXPORT_SYMBOL(drm_modeset_acquire_fini);
224efa246c0Sriastradh 
225efa246c0Sriastradh /**
226efa246c0Sriastradh  * drm_modeset_drop_locks - drop all locks
227efa246c0Sriastradh  * @ctx: the acquire context
228efa246c0Sriastradh  *
229efa246c0Sriastradh  * Drop all locks currently held against this acquire context.
230efa246c0Sriastradh  */
drm_modeset_drop_locks(struct drm_modeset_acquire_ctx * ctx)231efa246c0Sriastradh void drm_modeset_drop_locks(struct drm_modeset_acquire_ctx *ctx)
232efa246c0Sriastradh {
233efa246c0Sriastradh 	WARN_ON(ctx->contended);
234efa246c0Sriastradh 	while (!list_empty(&ctx->locked)) {
235efa246c0Sriastradh 		struct drm_modeset_lock *lock;
236efa246c0Sriastradh 
237efa246c0Sriastradh 		lock = list_first_entry(&ctx->locked,
238efa246c0Sriastradh 				struct drm_modeset_lock, head);
239efa246c0Sriastradh 
240efa246c0Sriastradh 		drm_modeset_unlock(lock);
241efa246c0Sriastradh 	}
242efa246c0Sriastradh }
243efa246c0Sriastradh EXPORT_SYMBOL(drm_modeset_drop_locks);
244efa246c0Sriastradh 
modeset_lock(struct drm_modeset_lock * lock,struct drm_modeset_acquire_ctx * ctx,bool interruptible,bool slow)245efa246c0Sriastradh static inline int modeset_lock(struct drm_modeset_lock *lock,
246efa246c0Sriastradh 		struct drm_modeset_acquire_ctx *ctx,
247efa246c0Sriastradh 		bool interruptible, bool slow)
248efa246c0Sriastradh {
249efa246c0Sriastradh 	int ret;
250efa246c0Sriastradh 
251efa246c0Sriastradh 	WARN_ON(ctx->contended);
252efa246c0Sriastradh 
253efa246c0Sriastradh 	if (ctx->trylock_only) {
254efa246c0Sriastradh 		lockdep_assert_held(&ctx->ww_ctx);
255efa246c0Sriastradh 
256efa246c0Sriastradh 		if (!ww_mutex_trylock(&lock->mutex))
257efa246c0Sriastradh 			return -EBUSY;
258efa246c0Sriastradh 		else
259efa246c0Sriastradh 			return 0;
260efa246c0Sriastradh 	} else if (interruptible && slow) {
261efa246c0Sriastradh 		ret = ww_mutex_lock_slow_interruptible(&lock->mutex, &ctx->ww_ctx);
262efa246c0Sriastradh 	} else if (interruptible) {
263efa246c0Sriastradh 		ret = ww_mutex_lock_interruptible(&lock->mutex, &ctx->ww_ctx);
264efa246c0Sriastradh 	} else if (slow) {
265efa246c0Sriastradh 		ww_mutex_lock_slow(&lock->mutex, &ctx->ww_ctx);
266efa246c0Sriastradh 		ret = 0;
267efa246c0Sriastradh 	} else {
268efa246c0Sriastradh 		ret = ww_mutex_lock(&lock->mutex, &ctx->ww_ctx);
269efa246c0Sriastradh 	}
270efa246c0Sriastradh 	if (!ret) {
271efa246c0Sriastradh 		WARN_ON(!list_empty(&lock->head));
272efa246c0Sriastradh 		list_add(&lock->head, &ctx->locked);
273efa246c0Sriastradh 	} else if (ret == -EALREADY) {
274efa246c0Sriastradh 		/* we already hold the lock.. this is fine.  For atomic
275efa246c0Sriastradh 		 * we will need to be able to drm_modeset_lock() things
276efa246c0Sriastradh 		 * without having to keep track of what is already locked
277efa246c0Sriastradh 		 * or not.
278efa246c0Sriastradh 		 */
279efa246c0Sriastradh 		ret = 0;
280efa246c0Sriastradh 	} else if (ret == -EDEADLK) {
281efa246c0Sriastradh 		ctx->contended = lock;
282efa246c0Sriastradh 	}
283efa246c0Sriastradh 
284efa246c0Sriastradh 	return ret;
285efa246c0Sriastradh }
286efa246c0Sriastradh 
287*41ec0267Sriastradh /**
288*41ec0267Sriastradh  * drm_modeset_backoff - deadlock avoidance backoff
289*41ec0267Sriastradh  * @ctx: the acquire context
290*41ec0267Sriastradh  *
291*41ec0267Sriastradh  * If deadlock is detected (ie. drm_modeset_lock() returns -EDEADLK),
292*41ec0267Sriastradh  * you must call this function to drop all currently held locks and
293*41ec0267Sriastradh  * block until the contended lock becomes available.
294*41ec0267Sriastradh  *
295*41ec0267Sriastradh  * This function returns 0 on success, or -ERESTARTSYS if this context
296*41ec0267Sriastradh  * is initialized with %DRM_MODESET_ACQUIRE_INTERRUPTIBLE and the
297*41ec0267Sriastradh  * wait has been interrupted.
298*41ec0267Sriastradh  */
drm_modeset_backoff(struct drm_modeset_acquire_ctx * ctx)299*41ec0267Sriastradh int drm_modeset_backoff(struct drm_modeset_acquire_ctx *ctx)
300efa246c0Sriastradh {
301efa246c0Sriastradh 	struct drm_modeset_lock *contended = ctx->contended;
302efa246c0Sriastradh 
303efa246c0Sriastradh 	ctx->contended = NULL;
304efa246c0Sriastradh 
305efa246c0Sriastradh 	if (WARN_ON(!contended))
306efa246c0Sriastradh 		return 0;
307efa246c0Sriastradh 
308efa246c0Sriastradh 	drm_modeset_drop_locks(ctx);
309efa246c0Sriastradh 
310*41ec0267Sriastradh 	return modeset_lock(contended, ctx, ctx->interruptible, true);
311efa246c0Sriastradh }
312efa246c0Sriastradh EXPORT_SYMBOL(drm_modeset_backoff);
313efa246c0Sriastradh 
314efa246c0Sriastradh /**
315*41ec0267Sriastradh  * drm_modeset_lock_init - initialize lock
316*41ec0267Sriastradh  * @lock: lock to init
317efa246c0Sriastradh  */
drm_modeset_lock_init(struct drm_modeset_lock * lock)318*41ec0267Sriastradh void drm_modeset_lock_init(struct drm_modeset_lock *lock)
319efa246c0Sriastradh {
320*41ec0267Sriastradh 	ww_mutex_init(&lock->mutex, &crtc_ww_class);
321*41ec0267Sriastradh 	INIT_LIST_HEAD(&lock->head);
322efa246c0Sriastradh }
323*41ec0267Sriastradh EXPORT_SYMBOL(drm_modeset_lock_init);
324efa246c0Sriastradh 
325efa246c0Sriastradh /**
326efa246c0Sriastradh  * drm_modeset_lock - take modeset lock
327efa246c0Sriastradh  * @lock: lock to take
328efa246c0Sriastradh  * @ctx: acquire ctx
329efa246c0Sriastradh  *
330*41ec0267Sriastradh  * If @ctx is not NULL, then its ww acquire context is used and the
331efa246c0Sriastradh  * lock will be tracked by the context and can be released by calling
332efa246c0Sriastradh  * drm_modeset_drop_locks().  If -EDEADLK is returned, this means a
333efa246c0Sriastradh  * deadlock scenario has been detected and it is an error to attempt
334efa246c0Sriastradh  * to take any more locks without first calling drm_modeset_backoff().
335*41ec0267Sriastradh  *
336*41ec0267Sriastradh  * If the @ctx is not NULL and initialized with
337*41ec0267Sriastradh  * %DRM_MODESET_ACQUIRE_INTERRUPTIBLE, this function will fail with
338*41ec0267Sriastradh  * -ERESTARTSYS when interrupted.
339*41ec0267Sriastradh  *
340*41ec0267Sriastradh  * If @ctx is NULL then the function call behaves like a normal,
341*41ec0267Sriastradh  * uninterruptible non-nesting mutex_lock() call.
342efa246c0Sriastradh  */
drm_modeset_lock(struct drm_modeset_lock * lock,struct drm_modeset_acquire_ctx * ctx)343efa246c0Sriastradh int drm_modeset_lock(struct drm_modeset_lock *lock,
344efa246c0Sriastradh 		struct drm_modeset_acquire_ctx *ctx)
345efa246c0Sriastradh {
346efa246c0Sriastradh 	if (ctx)
347*41ec0267Sriastradh 		return modeset_lock(lock, ctx, ctx->interruptible, false);
348efa246c0Sriastradh 
349efa246c0Sriastradh 	ww_mutex_lock(&lock->mutex, NULL);
350efa246c0Sriastradh 	return 0;
351efa246c0Sriastradh }
352efa246c0Sriastradh EXPORT_SYMBOL(drm_modeset_lock);
353efa246c0Sriastradh 
354efa246c0Sriastradh /**
355*41ec0267Sriastradh  * drm_modeset_lock_single_interruptible - take a single modeset lock
356efa246c0Sriastradh  * @lock: lock to take
357efa246c0Sriastradh  *
358*41ec0267Sriastradh  * This function behaves as drm_modeset_lock() with a NULL context,
359*41ec0267Sriastradh  * but performs interruptible waits.
360*41ec0267Sriastradh  *
361*41ec0267Sriastradh  * This function returns 0 on success, or -ERESTARTSYS when interrupted.
362efa246c0Sriastradh  */
drm_modeset_lock_single_interruptible(struct drm_modeset_lock * lock)363*41ec0267Sriastradh int drm_modeset_lock_single_interruptible(struct drm_modeset_lock *lock)
364efa246c0Sriastradh {
365efa246c0Sriastradh 	return ww_mutex_lock_interruptible(&lock->mutex, NULL);
366efa246c0Sriastradh }
367*41ec0267Sriastradh EXPORT_SYMBOL(drm_modeset_lock_single_interruptible);
368efa246c0Sriastradh 
369efa246c0Sriastradh /**
370efa246c0Sriastradh  * drm_modeset_unlock - drop modeset lock
371efa246c0Sriastradh  * @lock: lock to release
372efa246c0Sriastradh  */
drm_modeset_unlock(struct drm_modeset_lock * lock)373efa246c0Sriastradh void drm_modeset_unlock(struct drm_modeset_lock *lock)
374efa246c0Sriastradh {
375efa246c0Sriastradh 	list_del_init(&lock->head);
376efa246c0Sriastradh 	ww_mutex_unlock(&lock->mutex);
377efa246c0Sriastradh }
378efa246c0Sriastradh EXPORT_SYMBOL(drm_modeset_unlock);
379efa246c0Sriastradh 
380*41ec0267Sriastradh /**
381*41ec0267Sriastradh  * drm_modeset_lock_all_ctx - take all modeset locks
382*41ec0267Sriastradh  * @dev: DRM device
383*41ec0267Sriastradh  * @ctx: lock acquisition context
384*41ec0267Sriastradh  *
385*41ec0267Sriastradh  * This function takes all modeset locks, suitable where a more fine-grained
386*41ec0267Sriastradh  * scheme isn't (yet) implemented.
387*41ec0267Sriastradh  *
388*41ec0267Sriastradh  * Unlike drm_modeset_lock_all(), it doesn't take the &drm_mode_config.mutex
389*41ec0267Sriastradh  * since that lock isn't required for modeset state changes. Callers which
390*41ec0267Sriastradh  * need to grab that lock too need to do so outside of the acquire context
391*41ec0267Sriastradh  * @ctx.
392*41ec0267Sriastradh  *
393*41ec0267Sriastradh  * Locks acquired with this function should be released by calling the
394*41ec0267Sriastradh  * drm_modeset_drop_locks() function on @ctx.
395*41ec0267Sriastradh  *
396*41ec0267Sriastradh  * See also: DRM_MODESET_LOCK_ALL_BEGIN() and DRM_MODESET_LOCK_ALL_END()
397*41ec0267Sriastradh  *
398*41ec0267Sriastradh  * Returns: 0 on success or a negative error-code on failure.
399*41ec0267Sriastradh  */
drm_modeset_lock_all_ctx(struct drm_device * dev,struct drm_modeset_acquire_ctx * ctx)400*41ec0267Sriastradh int drm_modeset_lock_all_ctx(struct drm_device *dev,
401efa246c0Sriastradh 			     struct drm_modeset_acquire_ctx *ctx)
402efa246c0Sriastradh {
403*41ec0267Sriastradh 	struct drm_private_obj *privobj;
404efa246c0Sriastradh 	struct drm_crtc *crtc;
405efa246c0Sriastradh 	struct drm_plane *plane;
406*41ec0267Sriastradh 	int ret;
407*41ec0267Sriastradh 
408*41ec0267Sriastradh 	ret = drm_modeset_lock(&dev->mode_config.connection_mutex, ctx);
409*41ec0267Sriastradh 	if (ret)
410*41ec0267Sriastradh 		return ret;
411efa246c0Sriastradh 
412efa246c0Sriastradh 	drm_for_each_crtc(crtc, dev) {
413efa246c0Sriastradh 		ret = drm_modeset_lock(&crtc->mutex, ctx);
414efa246c0Sriastradh 		if (ret)
415efa246c0Sriastradh 			return ret;
416efa246c0Sriastradh 	}
417efa246c0Sriastradh 
418efa246c0Sriastradh 	drm_for_each_plane(plane, dev) {
419efa246c0Sriastradh 		ret = drm_modeset_lock(&plane->mutex, ctx);
420efa246c0Sriastradh 		if (ret)
421efa246c0Sriastradh 			return ret;
422efa246c0Sriastradh 	}
423efa246c0Sriastradh 
424*41ec0267Sriastradh 	drm_for_each_privobj(privobj, dev) {
425*41ec0267Sriastradh 		ret = drm_modeset_lock(&privobj->lock, ctx);
426*41ec0267Sriastradh 		if (ret)
427*41ec0267Sriastradh 			return ret;
428*41ec0267Sriastradh 	}
429*41ec0267Sriastradh 
430efa246c0Sriastradh 	return 0;
431efa246c0Sriastradh }
432*41ec0267Sriastradh EXPORT_SYMBOL(drm_modeset_lock_all_ctx);
433