xref: /netbsd-src/sys/external/bsd/drm2/dist/drm/ttm/ttm_execbuf_util.c (revision 82d56013d7b633d116a93943de88e08335357a7c)
1 /*	$NetBSD: ttm_execbuf_util.c,v 1.5 2020/02/14 14:34:59 maya Exp $	*/
2 
3 /**************************************************************************
4  *
5  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
6  * All Rights Reserved.
7  *
8  * Permission is hereby granted, free of charge, to any person obtaining a
9  * copy of this software and associated documentation files (the
10  * "Software"), to deal in the Software without restriction, including
11  * without limitation the rights to use, copy, modify, merge, publish,
12  * distribute, sub license, and/or sell copies of the Software, and to
13  * permit persons to whom the Software is furnished to do so, subject to
14  * the following conditions:
15  *
16  * The above copyright notice and this permission notice (including the
17  * next paragraph) shall be included in all copies or substantial portions
18  * of the Software.
19  *
20  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
23  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
24  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
25  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
26  * USE OR OTHER DEALINGS IN THE SOFTWARE.
27  *
28  **************************************************************************/
29 
30 #include <sys/cdefs.h>
31 __KERNEL_RCSID(0, "$NetBSD: ttm_execbuf_util.c,v 1.5 2020/02/14 14:34:59 maya Exp $");
32 
33 #include <drm/ttm/ttm_execbuf_util.h>
34 #include <drm/ttm/ttm_bo_driver.h>
35 #include <drm/ttm/ttm_placement.h>
36 #include <linux/wait.h>
37 #include <linux/sched.h>
38 #include <linux/module.h>
39 
40 static void ttm_eu_backoff_reservation_reverse(struct list_head *list,
41 					      struct ttm_validate_buffer *entry)
42 {
43 	list_for_each_entry_continue_reverse(entry, list, head) {
44 		struct ttm_buffer_object *bo = entry->bo;
45 
46 		__ttm_bo_unreserve(bo);
47 	}
48 }
49 
50 static void ttm_eu_del_from_lru_locked(struct list_head *list)
51 {
52 	struct ttm_validate_buffer *entry;
53 
54 	list_for_each_entry(entry, list, head) {
55 		struct ttm_buffer_object *bo = entry->bo;
56 		unsigned put_count = ttm_bo_del_from_lru(bo);
57 
58 		ttm_bo_list_ref_sub(bo, put_count, true);
59 	}
60 }
61 
62 void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
63 				struct list_head *list)
64 {
65 	struct ttm_validate_buffer *entry;
66 	struct ttm_bo_global *glob;
67 
68 	if (list_empty(list))
69 		return;
70 
71 	entry = list_first_entry(list, struct ttm_validate_buffer, head);
72 	glob = entry->bo->glob;
73 
74 	spin_lock(&glob->lru_lock);
75 	list_for_each_entry(entry, list, head) {
76 		struct ttm_buffer_object *bo = entry->bo;
77 
78 		ttm_bo_add_to_lru(bo);
79 		__ttm_bo_unreserve(bo);
80 	}
81 	spin_unlock(&glob->lru_lock);
82 
83 	if (ticket)
84 		ww_acquire_fini(ticket);
85 }
86 EXPORT_SYMBOL(ttm_eu_backoff_reservation);
87 
88 /*
89  * Reserve buffers for validation.
90  *
91  * If a buffer in the list is marked for CPU access, we back off and
92  * wait for that buffer to become free for GPU access.
93  *
94  * If a buffer is reserved for another validation, the validator with
95  * the highest validation sequence backs off and waits for that buffer
96  * to become unreserved. This prevents deadlocks when validating multiple
97  * buffers in different orders.
98  */
99 
100 int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
101 			   struct list_head *list, bool intr,
102 			   struct list_head *dups)
103 {
104 	struct ttm_bo_global *glob;
105 	struct ttm_validate_buffer *entry;
106 	int ret;
107 
108 	if (list_empty(list))
109 		return 0;
110 
111 	entry = list_first_entry(list, struct ttm_validate_buffer, head);
112 	glob = entry->bo->glob;
113 
114 	if (ticket)
115 		ww_acquire_init(ticket, &reservation_ww_class);
116 
117 	list_for_each_entry(entry, list, head) {
118 		struct ttm_buffer_object *bo = entry->bo;
119 
120 		ret = __ttm_bo_reserve(bo, intr, (ticket == NULL), true,
121 				       ticket);
122 		if (!ret && unlikely(atomic_read(&bo->cpu_writers) > 0)) {
123 			__ttm_bo_unreserve(bo);
124 
125 			ret = -EBUSY;
126 
127 		} else if (ret == -EALREADY && dups) {
128 			struct ttm_validate_buffer *safe = entry;
129 			entry = list_prev_entry(entry, head);
130 			list_del(&safe->head);
131 			list_add(&safe->head, dups);
132 			continue;
133 		}
134 
135 		if (!ret) {
136 			if (!entry->shared)
137 				continue;
138 
139 			ret = reservation_object_reserve_shared(bo->resv);
140 			if (!ret)
141 				continue;
142 		}
143 
144 		/* uh oh, we lost out, drop every reservation and try
145 		 * to only reserve this buffer, then start over if
146 		 * this succeeds.
147 		 */
148 		ttm_eu_backoff_reservation_reverse(list, entry);
149 
150 		if (ret == -EDEADLK && intr) {
151 			ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock,
152 							       ticket);
153 		} else if (ret == -EDEADLK) {
154 			ww_mutex_lock_slow(&bo->resv->lock, ticket);
155 			ret = 0;
156 		}
157 
158 		if (!ret && entry->shared)
159 			ret = reservation_object_reserve_shared(bo->resv);
160 
161 		if (unlikely(ret != 0)) {
162 			if (ret == -EINTR)
163 				ret = -ERESTARTSYS;
164 			if (ticket) {
165 				ww_acquire_done(ticket);
166 				ww_acquire_fini(ticket);
167 			}
168 			return ret;
169 		}
170 
171 		/* move this item to the front of the list,
172 		 * forces correct iteration of the loop without keeping track
173 		 */
174 		list_del(&entry->head);
175 		list_add(&entry->head, list);
176 	}
177 
178 	if (ticket)
179 		ww_acquire_done(ticket);
180 	spin_lock(&glob->lru_lock);
181 	ttm_eu_del_from_lru_locked(list);
182 	spin_unlock(&glob->lru_lock);
183 	return 0;
184 }
185 EXPORT_SYMBOL(ttm_eu_reserve_buffers);
186 
187 void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
188 				 struct list_head *list, struct fence *fence)
189 {
190 	struct ttm_validate_buffer *entry;
191 	struct ttm_buffer_object *bo;
192 	struct ttm_bo_global *glob;
193 	struct ttm_bo_device *bdev;
194 	struct ttm_bo_driver *driver __unused;
195 
196 	if (list_empty(list))
197 		return;
198 
199 	bo = list_first_entry(list, struct ttm_validate_buffer, head)->bo;
200 	bdev = bo->bdev;
201 	driver = bdev->driver;
202 	glob = bo->glob;
203 
204 	spin_lock(&glob->lru_lock);
205 
206 	list_for_each_entry(entry, list, head) {
207 		bo = entry->bo;
208 		if (entry->shared)
209 			reservation_object_add_shared_fence(bo->resv, fence);
210 		else
211 			reservation_object_add_excl_fence(bo->resv, fence);
212 		ttm_bo_add_to_lru(bo);
213 		__ttm_bo_unreserve(bo);
214 	}
215 	spin_unlock(&glob->lru_lock);
216 	if (ticket)
217 		ww_acquire_fini(ticket);
218 }
219 EXPORT_SYMBOL(ttm_eu_fence_buffer_objects);
220