1 /************************************************************************** 2 * 3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA 4 * All Rights Reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 24 * USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 **************************************************************************/ 27 28 #include <drm/ttm/ttm_execbuf_util.h> 29 #include <drm/ttm/ttm_bo_driver.h> 30 #include <drm/ttm/ttm_placement.h> 31 #include <linux/export.h> 32 #include <linux/wait.h> 33 34 static void ttm_eu_backoff_reservation_locked(struct list_head *list, 35 struct ww_acquire_ctx *ticket) 36 { 37 struct ttm_validate_buffer *entry; 38 39 list_for_each_entry(entry, list, head) { 40 struct ttm_buffer_object *bo = entry->bo; 41 if (!entry->reserved) 42 continue; 43 44 entry->reserved = false; 45 if (entry->removed) { 46 ttm_bo_unreserve_ticket_locked(bo, ticket); 47 entry->removed = false; 48 49 } else { 50 atomic_set(&bo->reserved, 0); 51 wake_up_all(&bo->event_queue); 52 } 53 } 54 } 55 56 static void ttm_eu_del_from_lru_locked(struct list_head *list) 57 { 58 struct ttm_validate_buffer *entry; 59 60 list_for_each_entry(entry, list, head) { 61 struct ttm_buffer_object *bo = entry->bo; 62 if (!entry->reserved) 63 continue; 64 65 if (!entry->removed) { 66 entry->put_count = ttm_bo_del_from_lru(bo); 67 entry->removed = true; 68 } 69 } 70 } 71 72 static void ttm_eu_list_ref_sub(struct list_head *list) 73 { 74 struct ttm_validate_buffer *entry; 75 76 list_for_each_entry(entry, list, head) { 77 struct ttm_buffer_object *bo = entry->bo; 78 79 if (entry->put_count) { 80 ttm_bo_list_ref_sub(bo, entry->put_count, true); 81 entry->put_count = 0; 82 } 83 } 84 } 85 86 void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket, 87 struct list_head *list) 88 { 89 struct ttm_validate_buffer *entry; 90 struct ttm_bo_global *glob; 91 92 if (list_empty(list)) 93 return; 94 95 entry = list_first_entry(list, struct ttm_validate_buffer, head); 96 glob = entry->bo->glob; 97 lockmgr(&glob->lru_lock, LK_EXCLUSIVE); 98 ttm_eu_backoff_reservation_locked(list, ticket); 99 ww_acquire_fini(ticket); 100 lockmgr(&glob->lru_lock, LK_RELEASE); 101 } 102 EXPORT_SYMBOL(ttm_eu_backoff_reservation); 103 104 /* 105 * Reserve buffers for validation. 106 * 107 * If a buffer in the list is marked for CPU access, we back off and 108 * wait for that buffer to become free for GPU access. 109 * 110 * If a buffer is reserved for another validation, the validator with 111 * the highest validation sequence backs off and waits for that buffer 112 * to become unreserved. This prevents deadlocks when validating multiple 113 * buffers in different orders. 114 */ 115 116 int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket, 117 struct list_head *list) 118 { 119 struct ttm_bo_global *glob; 120 struct ttm_validate_buffer *entry; 121 int ret; 122 123 if (list_empty(list)) 124 return 0; 125 126 list_for_each_entry(entry, list, head) { 127 entry->reserved = false; 128 entry->put_count = 0; 129 entry->removed = false; 130 } 131 132 entry = list_first_entry(list, struct ttm_validate_buffer, head); 133 glob = entry->bo->glob; 134 135 ww_acquire_init(ticket, &reservation_ww_class); 136 lockmgr(&glob->lru_lock, LK_EXCLUSIVE); 137 138 retry: 139 list_for_each_entry(entry, list, head) { 140 struct ttm_buffer_object *bo = entry->bo; 141 int owned; 142 143 /* already slowpath reserved? */ 144 if (entry->reserved) 145 continue; 146 147 ret = ttm_bo_reserve_nolru(bo, true, true, true, ticket); 148 switch (ret) { 149 case 0: 150 break; 151 case -EBUSY: 152 ttm_eu_del_from_lru_locked(list); 153 owned = lockstatus(&glob->lru_lock, curthread); 154 if (owned == LK_EXCLUSIVE) 155 lockmgr(&glob->lru_lock, LK_RELEASE); 156 ret = ttm_bo_reserve_nolru(bo, true, false, 157 true, ticket); 158 if (owned == LK_EXCLUSIVE) 159 lockmgr(&glob->lru_lock, LK_EXCLUSIVE); 160 if (!ret) 161 break; 162 163 if (unlikely(ret != -EAGAIN)) 164 goto err; 165 166 /* fallthrough */ 167 case -EAGAIN: 168 ttm_eu_backoff_reservation_locked(list, ticket); 169 lockmgr(&glob->lru_lock, LK_RELEASE); 170 ttm_eu_list_ref_sub(list); 171 ret = ttm_bo_reserve_slowpath_nolru(bo, true, ticket); 172 if (unlikely(ret != 0)) 173 goto err_fini; 174 175 lockmgr(&glob->lru_lock, LK_EXCLUSIVE); 176 entry->reserved = true; 177 if (unlikely(atomic_read(&bo->cpu_writers) > 0)) { 178 ret = -EBUSY; 179 goto err; 180 } 181 goto retry; 182 default: 183 goto err; 184 } 185 186 entry->reserved = true; 187 if (unlikely(atomic_read(&bo->cpu_writers) > 0)) { 188 ret = -EBUSY; 189 goto err; 190 } 191 } 192 193 ww_acquire_done(ticket); 194 ttm_eu_del_from_lru_locked(list); 195 lockmgr(&glob->lru_lock, LK_RELEASE); 196 ttm_eu_list_ref_sub(list); 197 return 0; 198 199 err: 200 ttm_eu_backoff_reservation_locked(list, ticket); 201 lockmgr(&glob->lru_lock, LK_RELEASE); 202 ttm_eu_list_ref_sub(list); 203 err_fini: 204 ww_acquire_done(ticket); 205 ww_acquire_fini(ticket); 206 return ret; 207 } 208 EXPORT_SYMBOL(ttm_eu_reserve_buffers); 209 210 void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket, 211 struct list_head *list, void *sync_obj) 212 { 213 struct ttm_validate_buffer *entry; 214 struct ttm_buffer_object *bo; 215 struct ttm_bo_global *glob; 216 struct ttm_bo_device *bdev; 217 struct ttm_bo_driver *driver; 218 219 if (list_empty(list)) 220 return; 221 222 bo = list_first_entry(list, struct ttm_validate_buffer, head)->bo; 223 bdev = bo->bdev; 224 driver = bdev->driver; 225 glob = bo->glob; 226 227 lockmgr(&glob->lru_lock, LK_EXCLUSIVE); 228 lockmgr(&bdev->fence_lock, LK_EXCLUSIVE); 229 230 list_for_each_entry(entry, list, head) { 231 bo = entry->bo; 232 entry->old_sync_obj = bo->sync_obj; 233 bo->sync_obj = driver->sync_obj_ref(sync_obj); 234 ttm_bo_unreserve_ticket_locked(bo, ticket); 235 entry->reserved = false; 236 } 237 lockmgr(&bdev->fence_lock, LK_RELEASE); 238 lockmgr(&glob->lru_lock, LK_RELEASE); 239 ww_acquire_fini(ticket); 240 241 list_for_each_entry(entry, list, head) { 242 if (entry->old_sync_obj) 243 driver->sync_obj_unref(&entry->old_sync_obj); 244 } 245 } 246 EXPORT_SYMBOL(ttm_eu_fence_buffer_objects); 247