1 /************************************************************************** 2 * 3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA 4 * All Rights Reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 24 * USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 **************************************************************************/ 27 28 #include <drm/ttm/ttm_execbuf_util.h> 29 #include <drm/ttm/ttm_bo_driver.h> 30 #include <drm/ttm/ttm_placement.h> 31 #include <linux/wait.h> 32 #include <linux/sched.h> 33 #include <linux/module.h> 34 35 static void ttm_eu_backoff_reservation_locked(struct list_head *list, 36 struct ww_acquire_ctx *ticket) 37 { 38 struct ttm_validate_buffer *entry; 39 40 list_for_each_entry(entry, list, head) { 41 struct ttm_buffer_object *bo = entry->bo; 42 if (!entry->reserved) 43 continue; 44 45 entry->reserved = false; 46 if (entry->removed) { 47 ttm_bo_unreserve_ticket_locked(bo, ticket); 48 entry->removed = false; 49 50 } else { 51 ww_mutex_unlock(&bo->resv->lock); 52 } 53 } 54 } 55 56 static void ttm_eu_del_from_lru_locked(struct list_head *list) 57 { 58 struct ttm_validate_buffer *entry; 59 60 list_for_each_entry(entry, list, head) { 61 struct ttm_buffer_object *bo = entry->bo; 62 if (!entry->reserved) 63 continue; 64 65 if (!entry->removed) { 66 entry->put_count = ttm_bo_del_from_lru(bo); 67 entry->removed = true; 68 } 69 } 70 } 71 72 static void ttm_eu_list_ref_sub(struct list_head *list) 73 { 74 struct ttm_validate_buffer *entry; 75 76 list_for_each_entry(entry, list, head) { 77 struct ttm_buffer_object *bo = entry->bo; 78 79 if (entry->put_count) { 80 ttm_bo_list_ref_sub(bo, entry->put_count, true); 81 entry->put_count = 0; 82 } 83 } 84 } 85 86 void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket, 87 struct list_head *list) 88 { 89 struct ttm_validate_buffer *entry; 90 struct ttm_bo_global *glob; 91 92 if (list_empty(list)) 93 return; 94 95 entry = list_first_entry(list, struct ttm_validate_buffer, head); 96 glob = entry->bo->glob; 97 lockmgr(&glob->lru_lock, LK_EXCLUSIVE); 98 ttm_eu_backoff_reservation_locked(list, ticket); 99 ww_acquire_fini(ticket); 100 lockmgr(&glob->lru_lock, LK_RELEASE); 101 } 102 EXPORT_SYMBOL(ttm_eu_backoff_reservation); 103 104 /* 105 * Reserve buffers for validation. 106 * 107 * If a buffer in the list is marked for CPU access, we back off and 108 * wait for that buffer to become free for GPU access. 109 * 110 * If a buffer is reserved for another validation, the validator with 111 * the highest validation sequence backs off and waits for that buffer 112 * to become unreserved. This prevents deadlocks when validating multiple 113 * buffers in different orders. 114 */ 115 116 int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket, 117 struct list_head *list) 118 { 119 struct ttm_bo_global *glob; 120 struct ttm_validate_buffer *entry; 121 int ret; 122 123 if (list_empty(list)) 124 return 0; 125 126 list_for_each_entry(entry, list, head) { 127 entry->reserved = false; 128 entry->put_count = 0; 129 entry->removed = false; 130 } 131 132 entry = list_first_entry(list, struct ttm_validate_buffer, head); 133 glob = entry->bo->glob; 134 135 ww_acquire_init(ticket, &reservation_ww_class); 136 137 retry: 138 list_for_each_entry(entry, list, head) { 139 struct ttm_buffer_object *bo = entry->bo; 140 141 /* already slowpath reserved? */ 142 if (entry->reserved) 143 continue; 144 145 ret = ttm_bo_reserve_nolru(bo, true, false, true, ticket); 146 147 148 if (ret == -EDEADLK) { 149 /* uh oh, we lost out, drop every reservation and try 150 * to only reserve this buffer, then start over if 151 * this succeeds. 152 */ 153 lockmgr(&glob->lru_lock, LK_EXCLUSIVE); 154 ttm_eu_backoff_reservation_locked(list, ticket); 155 lockmgr(&glob->lru_lock, LK_RELEASE); 156 ttm_eu_list_ref_sub(list); 157 ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock, 158 ticket); 159 if (unlikely(ret != 0)) { 160 if (ret == -EINTR) 161 ret = -ERESTARTSYS; 162 goto err_fini; 163 } 164 165 entry->reserved = true; 166 if (unlikely(atomic_read(&bo->cpu_writers) > 0)) { 167 ret = -EBUSY; 168 goto err; 169 } 170 goto retry; 171 } else if (ret) 172 goto err; 173 174 entry->reserved = true; 175 if (unlikely(atomic_read(&bo->cpu_writers) > 0)) { 176 ret = -EBUSY; 177 goto err; 178 } 179 } 180 181 ww_acquire_done(ticket); 182 lockmgr(&glob->lru_lock, LK_EXCLUSIVE); 183 ttm_eu_del_from_lru_locked(list); 184 lockmgr(&glob->lru_lock, LK_RELEASE); 185 ttm_eu_list_ref_sub(list); 186 return 0; 187 188 err: 189 lockmgr(&glob->lru_lock, LK_EXCLUSIVE); 190 ttm_eu_backoff_reservation_locked(list, ticket); 191 lockmgr(&glob->lru_lock, LK_RELEASE); 192 ttm_eu_list_ref_sub(list); 193 err_fini: 194 ww_acquire_done(ticket); 195 ww_acquire_fini(ticket); 196 return ret; 197 } 198 EXPORT_SYMBOL(ttm_eu_reserve_buffers); 199 200 void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket, 201 struct list_head *list, void *sync_obj) 202 { 203 struct ttm_validate_buffer *entry; 204 struct ttm_buffer_object *bo; 205 struct ttm_bo_global *glob; 206 struct ttm_bo_device *bdev; 207 struct ttm_bo_driver *driver; 208 209 if (list_empty(list)) 210 return; 211 212 bo = list_first_entry(list, struct ttm_validate_buffer, head)->bo; 213 bdev = bo->bdev; 214 driver = bdev->driver; 215 glob = bo->glob; 216 217 lockmgr(&glob->lru_lock, LK_EXCLUSIVE); 218 lockmgr(&bdev->fence_lock, LK_EXCLUSIVE); 219 220 list_for_each_entry(entry, list, head) { 221 bo = entry->bo; 222 entry->old_sync_obj = bo->sync_obj; 223 bo->sync_obj = driver->sync_obj_ref(sync_obj); 224 ttm_bo_unreserve_ticket_locked(bo, ticket); 225 entry->reserved = false; 226 } 227 lockmgr(&bdev->fence_lock, LK_RELEASE); 228 lockmgr(&glob->lru_lock, LK_RELEASE); 229 ww_acquire_fini(ticket); 230 231 list_for_each_entry(entry, list, head) { 232 if (entry->old_sync_obj) 233 driver->sync_obj_unref(&entry->old_sync_obj); 234 } 235 } 236 EXPORT_SYMBOL(ttm_eu_fence_buffer_objects); 237