xref: /openbsd-src/sys/dev/pci/drm/dma-resv.c (revision c1a45aed656e7d5627c30c92421893a76f370ccb)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright (C) 2012-2014 Canonical Ltd (Maarten Lankhorst)
4  *
5  * Based on bo.c which bears the following copyright notice,
6  * but is dual licensed:
7  *
8  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
9  * All Rights Reserved.
10  *
11  * Permission is hereby granted, free of charge, to any person obtaining a
12  * copy of this software and associated documentation files (the
13  * "Software"), to deal in the Software without restriction, including
14  * without limitation the rights to use, copy, modify, merge, publish,
15  * distribute, sub license, and/or sell copies of the Software, and to
16  * permit persons to whom the Software is furnished to do so, subject to
17  * the following conditions:
18  *
19  * The above copyright notice and this permission notice (including the
20  * next paragraph) shall be included in all copies or substantial portions
21  * of the Software.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
24  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
26  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
27  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
28  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
29  * USE OR OTHER DEALINGS IN THE SOFTWARE.
30  *
31  **************************************************************************/
32 /*
33  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
34  */
35 
36 #include <linux/dma-resv.h>
37 #include <linux/export.h>
38 #include <linux/mm.h>
39 #include <linux/sched/mm.h>
40 #include <linux/mmu_notifier.h>
41 
42 /**
43  * DOC: Reservation Object Overview
44  *
45  * The reservation object provides a mechanism to manage shared and
46  * exclusive fences associated with a buffer.  A reservation object
47  * can have attached one exclusive fence (normally associated with
48  * write operations) or N shared fences (read operations).  The RCU
49  * mechanism is used to protect read access to fences from locked
50  * write-side updates.
51  */
52 
53 DEFINE_WD_CLASS(reservation_ww_class);
54 EXPORT_SYMBOL(reservation_ww_class);
55 
56 /**
57  * dma_resv_list_alloc - allocate fence list
58  * @shared_max: number of fences we need space for
59  *
60  * Allocate a new dma_resv_list and make sure to correctly initialize
61  * shared_max.
62  */
63 static struct dma_resv_list *dma_resv_list_alloc(unsigned int shared_max)
64 {
65 	struct dma_resv_list *list;
66 
67 	list = kmalloc(struct_size(list, shared, shared_max), GFP_KERNEL);
68 	if (!list)
69 		return NULL;
70 
71 #ifdef __linux__
72 	list->shared_max = (ksize(list) - offsetof(typeof(*list), shared)) /
73 		sizeof(*list->shared);
74 #else
75 	list->shared_max = (offsetof(typeof(*list), shared[shared_max]) -
76 	    offsetof(typeof(*list), shared)) / sizeof(*list->shared);
77 #endif
78 
79 	return list;
80 }
81 
82 /**
83  * dma_resv_list_free - free fence list
84  * @list: list to free
85  *
86  * Free a dma_resv_list and make sure to drop all references.
87  */
88 static void dma_resv_list_free(struct dma_resv_list *list)
89 {
90 	unsigned int i;
91 
92 	if (!list)
93 		return;
94 
95 	for (i = 0; i < list->shared_count; ++i)
96 		dma_fence_put(rcu_dereference_protected(list->shared[i], true));
97 
98 	kfree_rcu(list, rcu);
99 }
100 
101 /**
102  * dma_resv_init - initialize a reservation object
103  * @obj: the reservation object
104  */
105 void dma_resv_init(struct dma_resv *obj)
106 {
107 	ww_mutex_init(&obj->lock, &reservation_ww_class);
108 	seqcount_init(&obj->seq);
109 
110 	RCU_INIT_POINTER(obj->fence, NULL);
111 	RCU_INIT_POINTER(obj->fence_excl, NULL);
112 }
113 EXPORT_SYMBOL(dma_resv_init);
114 
115 /**
116  * dma_resv_fini - destroys a reservation object
117  * @obj: the reservation object
118  */
119 void dma_resv_fini(struct dma_resv *obj)
120 {
121 	struct dma_resv_list *fobj;
122 	struct dma_fence *excl;
123 
124 	/*
125 	 * This object should be dead and all references must have
126 	 * been released to it, so no need to be protected with rcu.
127 	 */
128 	excl = rcu_dereference_protected(obj->fence_excl, 1);
129 	if (excl)
130 		dma_fence_put(excl);
131 
132 	fobj = rcu_dereference_protected(obj->fence, 1);
133 	dma_resv_list_free(fobj);
134 	ww_mutex_destroy(&obj->lock);
135 }
136 EXPORT_SYMBOL(dma_resv_fini);
137 
138 /**
139  * dma_resv_reserve_shared - Reserve space to add shared fences to
140  * a dma_resv.
141  * @obj: reservation object
142  * @num_fences: number of fences we want to add
143  *
144  * Should be called before dma_resv_add_shared_fence().  Must
145  * be called with obj->lock held.
146  *
147  * RETURNS
148  * Zero for success, or -errno
149  */
150 int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences)
151 {
152 	struct dma_resv_list *old, *new;
153 	unsigned int i, j, k, max;
154 
155 	dma_resv_assert_held(obj);
156 
157 	old = dma_resv_shared_list(obj);
158 	if (old && old->shared_max) {
159 		if ((old->shared_count + num_fences) <= old->shared_max)
160 			return 0;
161 		max = max(old->shared_count + num_fences, old->shared_max * 2);
162 	} else {
163 		max = max(4ul, roundup_pow_of_two(num_fences));
164 	}
165 
166 	new = dma_resv_list_alloc(max);
167 	if (!new)
168 		return -ENOMEM;
169 
170 	/*
171 	 * no need to bump fence refcounts, rcu_read access
172 	 * requires the use of kref_get_unless_zero, and the
173 	 * references from the old struct are carried over to
174 	 * the new.
175 	 */
176 	for (i = 0, j = 0, k = max; i < (old ? old->shared_count : 0); ++i) {
177 		struct dma_fence *fence;
178 
179 		fence = rcu_dereference_protected(old->shared[i],
180 						  dma_resv_held(obj));
181 		if (dma_fence_is_signaled(fence))
182 			RCU_INIT_POINTER(new->shared[--k], fence);
183 		else
184 			RCU_INIT_POINTER(new->shared[j++], fence);
185 	}
186 	new->shared_count = j;
187 
188 	/*
189 	 * We are not changing the effective set of fences here so can
190 	 * merely update the pointer to the new array; both existing
191 	 * readers and new readers will see exactly the same set of
192 	 * active (unsignaled) shared fences. Individual fences and the
193 	 * old array are protected by RCU and so will not vanish under
194 	 * the gaze of the rcu_read_lock() readers.
195 	 */
196 	rcu_assign_pointer(obj->fence, new);
197 
198 	if (!old)
199 		return 0;
200 
201 	/* Drop the references to the signaled fences */
202 	for (i = k; i < max; ++i) {
203 		struct dma_fence *fence;
204 
205 		fence = rcu_dereference_protected(new->shared[i],
206 						  dma_resv_held(obj));
207 		dma_fence_put(fence);
208 	}
209 	kfree_rcu(old, rcu);
210 
211 	return 0;
212 }
213 EXPORT_SYMBOL(dma_resv_reserve_shared);
214 
215 #ifdef CONFIG_DEBUG_MUTEXES
216 /**
217  * dma_resv_reset_shared_max - reset shared fences for debugging
218  * @obj: the dma_resv object to reset
219  *
220  * Reset the number of pre-reserved shared slots to test that drivers do
221  * correct slot allocation using dma_resv_reserve_shared(). See also
222  * &dma_resv_list.shared_max.
223  */
224 void dma_resv_reset_shared_max(struct dma_resv *obj)
225 {
226 	struct dma_resv_list *fences = dma_resv_shared_list(obj);
227 
228 	dma_resv_assert_held(obj);
229 
230 	/* Test shared fence slot reservation */
231 	if (fences)
232 		fences->shared_max = fences->shared_count;
233 }
234 EXPORT_SYMBOL(dma_resv_reset_shared_max);
235 #endif
236 
237 /**
238  * dma_resv_add_shared_fence - Add a fence to a shared slot
239  * @obj: the reservation object
240  * @fence: the shared fence to add
241  *
242  * Add a fence to a shared slot, obj->lock must be held, and
243  * dma_resv_reserve_shared() has been called.
244  */
245 void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence)
246 {
247 	struct dma_resv_list *fobj;
248 	struct dma_fence *old;
249 	unsigned int i, count;
250 
251 	dma_fence_get(fence);
252 
253 	dma_resv_assert_held(obj);
254 
255 	fobj = dma_resv_shared_list(obj);
256 	count = fobj->shared_count;
257 
258 	preempt_disable();
259 	write_seqcount_begin(&obj->seq);
260 
261 	for (i = 0; i < count; ++i) {
262 
263 		old = rcu_dereference_protected(fobj->shared[i],
264 						dma_resv_held(obj));
265 		if (old->context == fence->context ||
266 		    dma_fence_is_signaled(old))
267 			goto replace;
268 	}
269 
270 	BUG_ON(fobj->shared_count >= fobj->shared_max);
271 	old = NULL;
272 	count++;
273 
274 replace:
275 	RCU_INIT_POINTER(fobj->shared[i], fence);
276 	/* pointer update must be visible before we extend the shared_count */
277 	smp_store_mb(fobj->shared_count, count);
278 
279 	write_seqcount_end(&obj->seq);
280 	preempt_enable();
281 	dma_fence_put(old);
282 }
283 EXPORT_SYMBOL(dma_resv_add_shared_fence);
284 
285 /**
286  * dma_resv_add_excl_fence - Add an exclusive fence.
287  * @obj: the reservation object
288  * @fence: the shared fence to add
289  *
290  * Add a fence to the exclusive slot.  The obj->lock must be held.
291  */
292 void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence)
293 {
294 	struct dma_fence *old_fence = dma_resv_excl_fence(obj);
295 	struct dma_resv_list *old;
296 	u32 i = 0;
297 
298 	dma_resv_assert_held(obj);
299 
300 	old = dma_resv_shared_list(obj);
301 	if (old)
302 		i = old->shared_count;
303 
304 	if (fence)
305 		dma_fence_get(fence);
306 
307 	preempt_disable();
308 	write_seqcount_begin(&obj->seq);
309 	/* write_seqcount_begin provides the necessary memory barrier */
310 	RCU_INIT_POINTER(obj->fence_excl, fence);
311 	if (old)
312 		old->shared_count = 0;
313 	write_seqcount_end(&obj->seq);
314 	preempt_enable();
315 
316 	/* inplace update, no shared fences */
317 	while (i--)
318 		dma_fence_put(rcu_dereference_protected(old->shared[i],
319 						dma_resv_held(obj)));
320 
321 	dma_fence_put(old_fence);
322 }
323 EXPORT_SYMBOL(dma_resv_add_excl_fence);
324 
325 /**
326  * dma_resv_copy_fences - Copy all fences from src to dst.
327  * @dst: the destination reservation object
328  * @src: the source reservation object
329  *
330  * Copy all fences from src to dst. dst-lock must be held.
331  */
332 int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
333 {
334 	struct dma_resv_list *src_list, *dst_list;
335 	struct dma_fence *old, *new;
336 	unsigned int i;
337 
338 	dma_resv_assert_held(dst);
339 
340 	rcu_read_lock();
341 	src_list = dma_resv_shared_list(src);
342 
343 retry:
344 	if (src_list) {
345 		unsigned int shared_count = src_list->shared_count;
346 
347 		rcu_read_unlock();
348 
349 		dst_list = dma_resv_list_alloc(shared_count);
350 		if (!dst_list)
351 			return -ENOMEM;
352 
353 		rcu_read_lock();
354 		src_list = dma_resv_shared_list(src);
355 		if (!src_list || src_list->shared_count > shared_count) {
356 			kfree(dst_list);
357 			goto retry;
358 		}
359 
360 		dst_list->shared_count = 0;
361 		for (i = 0; i < src_list->shared_count; ++i) {
362 			struct dma_fence __rcu **dst;
363 			struct dma_fence *fence;
364 
365 			fence = rcu_dereference(src_list->shared[i]);
366 			if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
367 				     &fence->flags))
368 				continue;
369 
370 			if (!dma_fence_get_rcu(fence)) {
371 				dma_resv_list_free(dst_list);
372 				src_list = dma_resv_shared_list(src);
373 				goto retry;
374 			}
375 
376 			if (dma_fence_is_signaled(fence)) {
377 				dma_fence_put(fence);
378 				continue;
379 			}
380 
381 			dst = &dst_list->shared[dst_list->shared_count++];
382 			rcu_assign_pointer(*dst, fence);
383 		}
384 	} else {
385 		dst_list = NULL;
386 	}
387 
388 	new = dma_fence_get_rcu_safe(&src->fence_excl);
389 	rcu_read_unlock();
390 
391 	src_list = dma_resv_shared_list(dst);
392 	old = dma_resv_excl_fence(dst);
393 
394 	preempt_disable();
395 	write_seqcount_begin(&dst->seq);
396 	/* write_seqcount_begin provides the necessary memory barrier */
397 	RCU_INIT_POINTER(dst->fence_excl, new);
398 	RCU_INIT_POINTER(dst->fence, dst_list);
399 	write_seqcount_end(&dst->seq);
400 	preempt_enable();
401 
402 	dma_resv_list_free(src_list);
403 	dma_fence_put(old);
404 
405 	return 0;
406 }
407 EXPORT_SYMBOL(dma_resv_copy_fences);
408 
409 /**
410  * dma_resv_get_fences - Get an object's shared and exclusive
411  * fences without update side lock held
412  * @obj: the reservation object
413  * @pfence_excl: the returned exclusive fence (or NULL)
414  * @pshared_count: the number of shared fences returned
415  * @pshared: the array of shared fence ptrs returned (array is krealloc'd to
416  * the required size, and must be freed by caller)
417  *
418  * Retrieve all fences from the reservation object. If the pointer for the
419  * exclusive fence is not specified the fence is put into the array of the
420  * shared fences as well. Returns either zero or -ENOMEM.
421  */
422 int dma_resv_get_fences(struct dma_resv *obj, struct dma_fence **pfence_excl,
423 			unsigned int *pshared_count,
424 			struct dma_fence ***pshared)
425 {
426 	struct dma_fence **shared = NULL;
427 	struct dma_fence *fence_excl;
428 	unsigned int shared_count;
429 	int ret = 1;
430 
431 	do {
432 		struct dma_resv_list *fobj;
433 		unsigned int i, seq;
434 		size_t sz = 0;
435 
436 		shared_count = i = 0;
437 
438 		rcu_read_lock();
439 		seq = read_seqcount_begin(&obj->seq);
440 
441 		fence_excl = dma_resv_excl_fence(obj);
442 		if (fence_excl && !dma_fence_get_rcu(fence_excl))
443 			goto unlock;
444 
445 		fobj = dma_resv_shared_list(obj);
446 		if (fobj)
447 			sz += sizeof(*shared) * fobj->shared_max;
448 
449 		if (!pfence_excl && fence_excl)
450 			sz += sizeof(*shared);
451 
452 		if (sz) {
453 			struct dma_fence **nshared;
454 
455 #ifdef __linux__
456 			nshared = krealloc(shared, sz,
457 					   GFP_NOWAIT | __GFP_NOWARN);
458 #else
459 			nshared = kmalloc(sz, GFP_NOWAIT | __GFP_NOWARN);
460 			if (nshared != NULL && shared != NULL)
461 				memcpy(nshared, shared, sz);
462 			if (nshared) {
463 				kfree(shared);
464 				shared = NULL;
465 			}
466 #endif
467 			if (!nshared) {
468 				rcu_read_unlock();
469 
470 				dma_fence_put(fence_excl);
471 				fence_excl = NULL;
472 
473 #ifdef __linux__
474 				nshared = krealloc(shared, sz, GFP_KERNEL);
475 #else
476 				nshared = kmalloc(sz, GFP_KERNEL);
477 				if (nshared != NULL && shared != NULL)
478 					memcpy(nshared, shared, sz);
479 				kfree(shared);
480 				shared = NULL;
481 #endif
482 				if (nshared) {
483 					shared = nshared;
484 					continue;
485 				}
486 
487 				ret = -ENOMEM;
488 				break;
489 			}
490 			shared = nshared;
491 			shared_count = fobj ? fobj->shared_count : 0;
492 			for (i = 0; i < shared_count; ++i) {
493 				shared[i] = rcu_dereference(fobj->shared[i]);
494 				if (!dma_fence_get_rcu(shared[i]))
495 					break;
496 			}
497 		}
498 
499 		if (i != shared_count || read_seqcount_retry(&obj->seq, seq)) {
500 			while (i--)
501 				dma_fence_put(shared[i]);
502 			dma_fence_put(fence_excl);
503 			goto unlock;
504 		}
505 
506 		ret = 0;
507 unlock:
508 		rcu_read_unlock();
509 	} while (ret);
510 
511 	if (pfence_excl)
512 		*pfence_excl = fence_excl;
513 	else if (fence_excl)
514 		shared[shared_count++] = fence_excl;
515 
516 	if (!shared_count) {
517 		kfree(shared);
518 		shared = NULL;
519 	}
520 
521 	*pshared_count = shared_count;
522 	*pshared = shared;
523 	return ret;
524 }
525 EXPORT_SYMBOL_GPL(dma_resv_get_fences);
526 
527 /**
528  * dma_resv_wait_timeout - Wait on reservation's objects
529  * shared and/or exclusive fences.
530  * @obj: the reservation object
531  * @wait_all: if true, wait on all fences, else wait on just exclusive fence
532  * @intr: if true, do interruptible wait
533  * @timeout: timeout value in jiffies or zero to return immediately
534  *
535  * Callers are not required to hold specific locks, but maybe hold
536  * dma_resv_lock() already
537  * RETURNS
538  * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or
539  * greater than zer on success.
540  */
541 long dma_resv_wait_timeout(struct dma_resv *obj, bool wait_all, bool intr,
542 			   unsigned long timeout)
543 {
544 	long ret = timeout ? timeout : 1;
545 	unsigned int seq, shared_count;
546 	struct dma_fence *fence;
547 	int i;
548 
549 retry:
550 	shared_count = 0;
551 	seq = read_seqcount_begin(&obj->seq);
552 	rcu_read_lock();
553 	i = -1;
554 
555 	fence = dma_resv_excl_fence(obj);
556 	if (fence && !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
557 		if (!dma_fence_get_rcu(fence))
558 			goto unlock_retry;
559 
560 		if (dma_fence_is_signaled(fence)) {
561 			dma_fence_put(fence);
562 			fence = NULL;
563 		}
564 
565 	} else {
566 		fence = NULL;
567 	}
568 
569 	if (wait_all) {
570 		struct dma_resv_list *fobj = dma_resv_shared_list(obj);
571 
572 		if (fobj)
573 			shared_count = fobj->shared_count;
574 
575 		for (i = 0; !fence && i < shared_count; ++i) {
576 			struct dma_fence *lfence;
577 
578 			lfence = rcu_dereference(fobj->shared[i]);
579 			if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
580 				     &lfence->flags))
581 				continue;
582 
583 			if (!dma_fence_get_rcu(lfence))
584 				goto unlock_retry;
585 
586 			if (dma_fence_is_signaled(lfence)) {
587 				dma_fence_put(lfence);
588 				continue;
589 			}
590 
591 			fence = lfence;
592 			break;
593 		}
594 	}
595 
596 	rcu_read_unlock();
597 	if (fence) {
598 		if (read_seqcount_retry(&obj->seq, seq)) {
599 			dma_fence_put(fence);
600 			goto retry;
601 		}
602 
603 		ret = dma_fence_wait_timeout(fence, intr, ret);
604 		dma_fence_put(fence);
605 		if (ret > 0 && wait_all && (i + 1 < shared_count))
606 			goto retry;
607 	}
608 	return ret;
609 
610 unlock_retry:
611 	rcu_read_unlock();
612 	goto retry;
613 }
614 EXPORT_SYMBOL_GPL(dma_resv_wait_timeout);
615 
616 
617 static inline int dma_resv_test_signaled_single(struct dma_fence *passed_fence)
618 {
619 	struct dma_fence *fence, *lfence = passed_fence;
620 	int ret = 1;
621 
622 	if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &lfence->flags)) {
623 		fence = dma_fence_get_rcu(lfence);
624 		if (!fence)
625 			return -1;
626 
627 		ret = !!dma_fence_is_signaled(fence);
628 		dma_fence_put(fence);
629 	}
630 	return ret;
631 }
632 
633 /**
634  * dma_resv_test_signaled - Test if a reservation object's fences have been
635  * signaled.
636  * @obj: the reservation object
637  * @test_all: if true, test all fences, otherwise only test the exclusive
638  * fence
639  *
640  * Callers are not required to hold specific locks, but maybe hold
641  * dma_resv_lock() already
642  * RETURNS
643  * true if all fences signaled, else false
644  */
645 bool dma_resv_test_signaled(struct dma_resv *obj, bool test_all)
646 {
647 	struct dma_fence *fence;
648 	unsigned int seq;
649 	int ret;
650 
651 	rcu_read_lock();
652 retry:
653 	ret = true;
654 	seq = read_seqcount_begin(&obj->seq);
655 
656 	if (test_all) {
657 		struct dma_resv_list *fobj = dma_resv_shared_list(obj);
658 		unsigned int i, shared_count;
659 
660 		shared_count = fobj ? fobj->shared_count : 0;
661 		for (i = 0; i < shared_count; ++i) {
662 			fence = rcu_dereference(fobj->shared[i]);
663 			ret = dma_resv_test_signaled_single(fence);
664 			if (ret < 0)
665 				goto retry;
666 			else if (!ret)
667 				break;
668 		}
669 	}
670 
671 	fence = dma_resv_excl_fence(obj);
672 	if (ret && fence) {
673 		ret = dma_resv_test_signaled_single(fence);
674 		if (ret < 0)
675 			goto retry;
676 
677 	}
678 
679 	if (read_seqcount_retry(&obj->seq, seq))
680 		goto retry;
681 
682 	rcu_read_unlock();
683 	return ret;
684 }
685 EXPORT_SYMBOL_GPL(dma_resv_test_signaled);
686 
687 #if IS_ENABLED(CONFIG_LOCKDEP)
688 static int __init dma_resv_lockdep(void)
689 {
690 	struct mm_struct *mm = mm_alloc();
691 	struct ww_acquire_ctx ctx;
692 	struct dma_resv obj;
693 	struct address_space mapping;
694 	int ret;
695 
696 	if (!mm)
697 		return -ENOMEM;
698 
699 	dma_resv_init(&obj);
700 	address_space_init_once(&mapping);
701 
702 	mmap_read_lock(mm);
703 	ww_acquire_init(&ctx, &reservation_ww_class);
704 	ret = dma_resv_lock(&obj, &ctx);
705 	if (ret == -EDEADLK)
706 		dma_resv_lock_slow(&obj, &ctx);
707 	fs_reclaim_acquire(GFP_KERNEL);
708 	/* for unmap_mapping_range on trylocked buffer objects in shrinkers */
709 	i_mmap_lock_write(&mapping);
710 	i_mmap_unlock_write(&mapping);
711 #ifdef CONFIG_MMU_NOTIFIER
712 	lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
713 	__dma_fence_might_wait();
714 	lock_map_release(&__mmu_notifier_invalidate_range_start_map);
715 #else
716 	__dma_fence_might_wait();
717 #endif
718 	fs_reclaim_release(GFP_KERNEL);
719 	ww_mutex_unlock(&obj.lock);
720 	ww_acquire_fini(&ctx);
721 	mmap_read_unlock(mm);
722 
723 	mmput(mm);
724 
725 	return 0;
726 }
727 subsys_initcall(dma_resv_lockdep);
728 #endif
729