xref: /openbsd-src/sys/dev/pci/drm/ttm/ttm_execbuf_util.c (revision cb39b41371628601fbe4c618205356d538b9d08a)
1 /*	$OpenBSD: ttm_execbuf_util.c,v 1.3 2015/04/06 05:35:29 jsg Exp $	*/
2 /**************************************************************************
3  *
4  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25  * USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  **************************************************************************/
28 
29 #include <dev/pci/drm/ttm/ttm_execbuf_util.h>
30 #include <dev/pci/drm/ttm/ttm_bo_driver.h>
31 #include <dev/pci/drm/ttm/ttm_placement.h>
32 
33 static void ttm_eu_backoff_reservation_locked(struct list_head *list)
34 {
35 	struct ttm_validate_buffer *entry;
36 
37 	list_for_each_entry(entry, list, head) {
38 		struct ttm_buffer_object *bo = entry->bo;
39 		if (!entry->reserved)
40 			continue;
41 
42 		if (entry->removed) {
43 			ttm_bo_add_to_lru(bo);
44 			entry->removed = false;
45 
46 		}
47 		entry->reserved = false;
48 		atomic_set(&bo->reserved, 0);
49 		wake_up_all(&bo->event_queue);
50 	}
51 }
52 
53 static void ttm_eu_del_from_lru_locked(struct list_head *list)
54 {
55 	struct ttm_validate_buffer *entry;
56 
57 	list_for_each_entry(entry, list, head) {
58 		struct ttm_buffer_object *bo = entry->bo;
59 		if (!entry->reserved)
60 			continue;
61 
62 		if (!entry->removed) {
63 			entry->put_count = ttm_bo_del_from_lru(bo);
64 			entry->removed = true;
65 		}
66 	}
67 }
68 
69 static void ttm_eu_list_ref_sub(struct list_head *list)
70 {
71 	struct ttm_validate_buffer *entry;
72 
73 	list_for_each_entry(entry, list, head) {
74 		struct ttm_buffer_object *bo = entry->bo;
75 
76 		if (entry->put_count) {
77 			ttm_bo_list_ref_sub(bo, entry->put_count, true);
78 			entry->put_count = 0;
79 		}
80 	}
81 }
82 
83 static int ttm_eu_wait_unreserved_locked(struct list_head *list,
84 					 struct ttm_buffer_object *bo)
85 {
86 	struct ttm_bo_global *glob = bo->glob;
87 	int ret;
88 
89 	ttm_eu_del_from_lru_locked(list);
90 	spin_unlock(&glob->lru_lock);
91 	ret = ttm_bo_wait_unreserved(bo, true);
92 	spin_lock(&glob->lru_lock);
93 	if (unlikely(ret != 0))
94 		ttm_eu_backoff_reservation_locked(list);
95 	return ret;
96 }
97 
98 
99 void ttm_eu_backoff_reservation(struct list_head *list)
100 {
101 	struct ttm_validate_buffer *entry;
102 	struct ttm_bo_global *glob;
103 
104 	if (list_empty(list))
105 		return;
106 
107 	entry = list_first_entry(list, struct ttm_validate_buffer, head);
108 	glob = entry->bo->glob;
109 	spin_lock(&glob->lru_lock);
110 	ttm_eu_backoff_reservation_locked(list);
111 	spin_unlock(&glob->lru_lock);
112 }
113 EXPORT_SYMBOL(ttm_eu_backoff_reservation);
114 
115 /*
116  * Reserve buffers for validation.
117  *
118  * If a buffer in the list is marked for CPU access, we back off and
119  * wait for that buffer to become free for GPU access.
120  *
121  * If a buffer is reserved for another validation, the validator with
122  * the highest validation sequence backs off and waits for that buffer
123  * to become unreserved. This prevents deadlocks when validating multiple
124  * buffers in different orders.
125  */
126 
127 int ttm_eu_reserve_buffers(struct list_head *list)
128 {
129 	struct ttm_bo_global *glob;
130 	struct ttm_validate_buffer *entry;
131 	int ret;
132 	uint32_t val_seq;
133 
134 	if (list_empty(list))
135 		return 0;
136 
137 	list_for_each_entry(entry, list, head) {
138 		entry->reserved = false;
139 		entry->put_count = 0;
140 		entry->removed = false;
141 	}
142 
143 	entry = list_first_entry(list, struct ttm_validate_buffer, head);
144 	glob = entry->bo->glob;
145 
146 retry:
147 	spin_lock(&glob->lru_lock);
148 	val_seq = entry->bo->bdev->val_seq++;
149 
150 	list_for_each_entry(entry, list, head) {
151 		struct ttm_buffer_object *bo = entry->bo;
152 
153 retry_this_bo:
154 		ret = ttm_bo_reserve_locked(bo, true, true, true, val_seq);
155 		switch (ret) {
156 		case 0:
157 			break;
158 		case -EBUSY:
159 			ret = ttm_eu_wait_unreserved_locked(list, bo);
160 			if (unlikely(ret != 0)) {
161 				spin_unlock(&glob->lru_lock);
162 				ttm_eu_list_ref_sub(list);
163 				return ret;
164 			}
165 			goto retry_this_bo;
166 		case -EAGAIN:
167 			ttm_eu_backoff_reservation_locked(list);
168 			spin_unlock(&glob->lru_lock);
169 			ttm_eu_list_ref_sub(list);
170 			ret = ttm_bo_wait_unreserved(bo, true);
171 			if (unlikely(ret != 0))
172 				return ret;
173 			goto retry;
174 		default:
175 			ttm_eu_backoff_reservation_locked(list);
176 			spin_unlock(&glob->lru_lock);
177 			ttm_eu_list_ref_sub(list);
178 			return ret;
179 		}
180 
181 		entry->reserved = true;
182 		if (unlikely(atomic_read(&bo->cpu_writers) > 0)) {
183 			ttm_eu_backoff_reservation_locked(list);
184 			spin_unlock(&glob->lru_lock);
185 			ttm_eu_list_ref_sub(list);
186 			return -EBUSY;
187 		}
188 	}
189 
190 	ttm_eu_del_from_lru_locked(list);
191 	spin_unlock(&glob->lru_lock);
192 	ttm_eu_list_ref_sub(list);
193 
194 	return 0;
195 }
196 EXPORT_SYMBOL(ttm_eu_reserve_buffers);
197 
198 void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj)
199 {
200 	struct ttm_validate_buffer *entry;
201 	struct ttm_buffer_object *bo;
202 	struct ttm_bo_global *glob;
203 	struct ttm_bo_device *bdev;
204 	struct ttm_bo_driver *driver;
205 
206 	if (list_empty(list))
207 		return;
208 
209 	bo = list_first_entry(list, struct ttm_validate_buffer, head)->bo;
210 	bdev = bo->bdev;
211 	driver = bdev->driver;
212 	glob = bo->glob;
213 
214 	spin_lock(&glob->lru_lock);
215 	spin_lock(&bdev->fence_lock);
216 
217 	list_for_each_entry(entry, list, head) {
218 		bo = entry->bo;
219 		entry->old_sync_obj = bo->sync_obj;
220 		bo->sync_obj = driver->sync_obj_ref(sync_obj);
221 		ttm_bo_unreserve_locked(bo);
222 		entry->reserved = false;
223 	}
224 	spin_unlock(&bdev->fence_lock);
225 	spin_unlock(&glob->lru_lock);
226 
227 	list_for_each_entry(entry, list, head) {
228 		if (entry->old_sync_obj)
229 			driver->sync_obj_unref(&entry->old_sync_obj);
230 	}
231 }
232 EXPORT_SYMBOL(ttm_eu_fence_buffer_objects);
233