xref: /dflybsd-src/sys/dev/drm/ttm/ttm_execbuf_util.c (revision 9581b82e394de41d36d060987c67d4de7999ab80)
1 /**************************************************************************
2  *
3  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 
28 #include <drm/ttm/ttm_execbuf_util.h>
29 #include <drm/ttm/ttm_bo_driver.h>
30 #include <drm/ttm/ttm_placement.h>
31 #include <linux/export.h>
32 
33 static void ttm_eu_backoff_reservation_locked(struct list_head *list)
34 {
35 	struct ttm_validate_buffer *entry;
36 
37 	list_for_each_entry(entry, list, head) {
38 		struct ttm_buffer_object *bo = entry->bo;
39 		if (!entry->reserved)
40 			continue;
41 
42 		if (entry->removed) {
43 			ttm_bo_add_to_lru(bo);
44 			entry->removed = false;
45 
46 		}
47 		entry->reserved = false;
48 		atomic_set(&bo->reserved, 0);
49 		wakeup(bo);
50 	}
51 }
52 
53 static void ttm_eu_del_from_lru_locked(struct list_head *list)
54 {
55 	struct ttm_validate_buffer *entry;
56 
57 	list_for_each_entry(entry, list, head) {
58 		struct ttm_buffer_object *bo = entry->bo;
59 		if (!entry->reserved)
60 			continue;
61 
62 		if (!entry->removed) {
63 			entry->put_count = ttm_bo_del_from_lru(bo);
64 			entry->removed = true;
65 		}
66 	}
67 }
68 
69 static void ttm_eu_list_ref_sub(struct list_head *list)
70 {
71 	struct ttm_validate_buffer *entry;
72 
73 	list_for_each_entry(entry, list, head) {
74 		struct ttm_buffer_object *bo = entry->bo;
75 
76 		if (entry->put_count) {
77 			ttm_bo_list_ref_sub(bo, entry->put_count, true);
78 			entry->put_count = 0;
79 		}
80 	}
81 }
82 
83 void ttm_eu_backoff_reservation(struct list_head *list)
84 {
85 	struct ttm_validate_buffer *entry;
86 	struct ttm_bo_global *glob;
87 
88 	if (list_empty(list))
89 		return;
90 
91 	entry = list_first_entry(list, struct ttm_validate_buffer, head);
92 	glob = entry->bo->glob;
93 	lockmgr(&glob->lru_lock, LK_EXCLUSIVE);
94 	ttm_eu_backoff_reservation_locked(list);
95 	lockmgr(&glob->lru_lock, LK_RELEASE);
96 }
97 EXPORT_SYMBOL(ttm_eu_backoff_reservation);
98 
99 /*
100  * Reserve buffers for validation.
101  *
102  * If a buffer in the list is marked for CPU access, we back off and
103  * wait for that buffer to become free for GPU access.
104  *
105  * If a buffer is reserved for another validation, the validator with
106  * the highest validation sequence backs off and waits for that buffer
107  * to become unreserved. This prevents deadlocks when validating multiple
108  * buffers in different orders.
109  */
110 
111 int ttm_eu_reserve_buffers(struct list_head *list)
112 {
113 	struct ttm_bo_global *glob;
114 	struct ttm_validate_buffer *entry;
115 	int ret;
116 	uint32_t val_seq;
117 
118 	if (list_empty(list))
119 		return 0;
120 
121 	list_for_each_entry(entry, list, head) {
122 		entry->reserved = false;
123 		entry->put_count = 0;
124 		entry->removed = false;
125 	}
126 
127 	entry = list_first_entry(list, struct ttm_validate_buffer, head);
128 	glob = entry->bo->glob;
129 
130 	lockmgr(&glob->lru_lock, LK_EXCLUSIVE);
131 	val_seq = entry->bo->bdev->val_seq++;
132 
133 retry:
134 	list_for_each_entry(entry, list, head) {
135 		struct ttm_buffer_object *bo = entry->bo;
136 
137 		/* already slowpath reserved? */
138 		if (entry->reserved)
139 			continue;
140 
141 		ret = ttm_bo_reserve_nolru(bo, true, true, true, val_seq);
142 		switch (ret) {
143 		case 0:
144 			break;
145 		case -EBUSY:
146 			ttm_eu_del_from_lru_locked(list);
147 			lockmgr(&glob->lru_lock, LK_RELEASE);
148 			ret = ttm_bo_reserve_nolru(bo, true, false,
149 						   true, val_seq);
150 			lockmgr(&glob->lru_lock, LK_EXCLUSIVE);
151 			if (!ret)
152 				break;
153 
154 			if (unlikely(ret != -EAGAIN))
155 				goto err;
156 
157 			/* fallthrough */
158 		case -EAGAIN:
159 			ttm_eu_backoff_reservation_locked(list);
160 
161 			/*
162 			 * temporarily increase sequence number every retry,
163 			 * to prevent us from seeing our old reservation
164 			 * sequence when someone else reserved the buffer,
165 			 * but hasn't updated the seq_valid/seqno members yet.
166 			 */
167 			val_seq = entry->bo->bdev->val_seq++;
168 
169 			lockmgr(&glob->lru_lock, LK_RELEASE);
170 			ttm_eu_list_ref_sub(list);
171 			ret = ttm_bo_reserve_slowpath_nolru(bo, true, val_seq);
172 			if (unlikely(ret != 0))
173 				return ret;
174 			lockmgr(&glob->lru_lock, LK_EXCLUSIVE);
175 			entry->reserved = true;
176 			if (unlikely(atomic_read(&bo->cpu_writers) > 0)) {
177 				ret = -EBUSY;
178 				goto err;
179 			}
180 			goto retry;
181 		default:
182 			goto err;
183 		}
184 
185 		entry->reserved = true;
186 		if (unlikely(atomic_read(&bo->cpu_writers) > 0)) {
187 			ret = -EBUSY;
188 			goto err;
189 		}
190 	}
191 
192 	ttm_eu_del_from_lru_locked(list);
193 	lockmgr(&glob->lru_lock, LK_RELEASE);
194 	ttm_eu_list_ref_sub(list);
195 
196 	return 0;
197 
198 err:
199 	ttm_eu_backoff_reservation_locked(list);
200 	lockmgr(&glob->lru_lock, LK_RELEASE);
201 	ttm_eu_list_ref_sub(list);
202 	return ret;
203 }
204 EXPORT_SYMBOL(ttm_eu_reserve_buffers);
205 
206 void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj)
207 {
208 	struct ttm_validate_buffer *entry;
209 	struct ttm_buffer_object *bo;
210 	struct ttm_bo_global *glob;
211 	struct ttm_bo_device *bdev;
212 	struct ttm_bo_driver *driver;
213 
214 	if (list_empty(list))
215 		return;
216 
217 	bo = list_first_entry(list, struct ttm_validate_buffer, head)->bo;
218 	bdev = bo->bdev;
219 	driver = bdev->driver;
220 	glob = bo->glob;
221 
222 	lockmgr(&glob->lru_lock, LK_EXCLUSIVE);
223 	lockmgr(&bdev->fence_lock, LK_EXCLUSIVE);
224 
225 	list_for_each_entry(entry, list, head) {
226 		bo = entry->bo;
227 		entry->old_sync_obj = bo->sync_obj;
228 		bo->sync_obj = driver->sync_obj_ref(sync_obj);
229 		ttm_bo_unreserve_locked(bo);
230 		entry->reserved = false;
231 	}
232 	lockmgr(&bdev->fence_lock, LK_RELEASE);
233 	lockmgr(&glob->lru_lock, LK_RELEASE);
234 
235 	list_for_each_entry(entry, list, head) {
236 		if (entry->old_sync_obj)
237 			driver->sync_obj_unref(&entry->old_sync_obj);
238 	}
239 }
240 EXPORT_SYMBOL(ttm_eu_fence_buffer_objects);
241