xref: /openbsd-src/sys/dev/pci/drm/ttm/ttm_execbuf_util.c (revision f005ef32267c16bdb134f0e9fa4477dbe07c263a)
17f4dd379Sjsg /* SPDX-License-Identifier: GPL-2.0 OR MIT */
21099013bSjsg /**************************************************************************
31099013bSjsg  *
41099013bSjsg  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
51099013bSjsg  * All Rights Reserved.
61099013bSjsg  *
71099013bSjsg  * Permission is hereby granted, free of charge, to any person obtaining a
81099013bSjsg  * copy of this software and associated documentation files (the
91099013bSjsg  * "Software"), to deal in the Software without restriction, including
101099013bSjsg  * without limitation the rights to use, copy, modify, merge, publish,
111099013bSjsg  * distribute, sub license, and/or sell copies of the Software, and to
121099013bSjsg  * permit persons to whom the Software is furnished to do so, subject to
131099013bSjsg  * the following conditions:
141099013bSjsg  *
151099013bSjsg  * The above copyright notice and this permission notice (including the
161099013bSjsg  * next paragraph) shall be included in all copies or substantial portions
171099013bSjsg  * of the Software.
181099013bSjsg  *
191099013bSjsg  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
201099013bSjsg  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
211099013bSjsg  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
221099013bSjsg  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
231099013bSjsg  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
241099013bSjsg  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
251099013bSjsg  * USE OR OTHER DEALINGS IN THE SOFTWARE.
261099013bSjsg  *
271099013bSjsg  **************************************************************************/
281099013bSjsg 
297f4dd379Sjsg #include <drm/ttm/ttm_execbuf_util.h>
30*f005ef32Sjsg #include <drm/ttm/ttm_bo.h>
311099013bSjsg 
ttm_eu_backoff_reservation_reverse(struct list_head * list,struct ttm_validate_buffer * entry)327ccd5a2cSjsg static void ttm_eu_backoff_reservation_reverse(struct list_head *list,
337ccd5a2cSjsg 					      struct ttm_validate_buffer *entry)
341099013bSjsg {
357ccd5a2cSjsg 	list_for_each_entry_continue_reverse(entry, list, head) {
36f3eef2b6Sderaadt 		struct ttm_buffer_object *bo = entry->bo;
37f3eef2b6Sderaadt 
38c349dbc7Sjsg 		dma_resv_unlock(bo->base.resv);
39f3eef2b6Sderaadt 	}
401099013bSjsg }
411099013bSjsg 
ttm_eu_backoff_reservation(struct ww_acquire_ctx * ticket,struct list_head * list)427ccd5a2cSjsg void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
437ccd5a2cSjsg 				struct list_head *list)
441099013bSjsg {
451099013bSjsg 	struct ttm_validate_buffer *entry;
461099013bSjsg 
471099013bSjsg 	if (list_empty(list))
481099013bSjsg 		return;
491099013bSjsg 
507ccd5a2cSjsg 	list_for_each_entry(entry, list, head) {
517ccd5a2cSjsg 		struct ttm_buffer_object *bo = entry->bo;
527ccd5a2cSjsg 
535ca02815Sjsg 		ttm_bo_move_to_lru_tail_unlocked(bo);
54c349dbc7Sjsg 		dma_resv_unlock(bo->base.resv);
557ccd5a2cSjsg 	}
567ccd5a2cSjsg 
577ccd5a2cSjsg 	if (ticket)
587ccd5a2cSjsg 		ww_acquire_fini(ticket);
591099013bSjsg }
601099013bSjsg EXPORT_SYMBOL(ttm_eu_backoff_reservation);
611099013bSjsg 
621099013bSjsg /*
631099013bSjsg  * Reserve buffers for validation.
641099013bSjsg  *
651099013bSjsg  * If a buffer in the list is marked for CPU access, we back off and
661099013bSjsg  * wait for that buffer to become free for GPU access.
671099013bSjsg  *
681099013bSjsg  * If a buffer is reserved for another validation, the validator with
691099013bSjsg  * the highest validation sequence backs off and waits for that buffer
701099013bSjsg  * to become unreserved. This prevents deadlocks when validating multiple
711099013bSjsg  * buffers in different orders.
721099013bSjsg  */
731099013bSjsg 
ttm_eu_reserve_buffers(struct ww_acquire_ctx * ticket,struct list_head * list,bool intr,struct list_head * dups)747ccd5a2cSjsg int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
757ccd5a2cSjsg 			   struct list_head *list, bool intr,
767ccd5a2cSjsg 			   struct list_head *dups)
771099013bSjsg {
781099013bSjsg 	struct ttm_validate_buffer *entry;
791099013bSjsg 	int ret;
801099013bSjsg 
811099013bSjsg 	if (list_empty(list))
821099013bSjsg 		return 0;
831099013bSjsg 
847ccd5a2cSjsg 	if (ticket)
857ccd5a2cSjsg 		ww_acquire_init(ticket, &reservation_ww_class);
861099013bSjsg 
871099013bSjsg 	list_for_each_entry(entry, list, head) {
881099013bSjsg 		struct ttm_buffer_object *bo = entry->bo;
891bb76ff1Sjsg 		unsigned int num_fences;
901099013bSjsg 
91ad8b1aafSjsg 		ret = ttm_bo_reserve(bo, intr, (ticket == NULL), ticket);
92c349dbc7Sjsg 		if (ret == -EALREADY && dups) {
937ccd5a2cSjsg 			struct ttm_validate_buffer *safe = entry;
947ccd5a2cSjsg 			entry = list_prev_entry(entry, head);
957ccd5a2cSjsg 			list_del(&safe->head);
967ccd5a2cSjsg 			list_add(&safe->head, dups);
977ccd5a2cSjsg 			continue;
987ccd5a2cSjsg 		}
997ccd5a2cSjsg 
1001bb76ff1Sjsg 		num_fences = max(entry->num_shared, 1u);
1017ccd5a2cSjsg 		if (!ret) {
1021bb76ff1Sjsg 			ret = dma_resv_reserve_fences(bo->base.resv,
1031bb76ff1Sjsg 						      num_fences);
1047ccd5a2cSjsg 			if (!ret)
1057ccd5a2cSjsg 				continue;
1067ccd5a2cSjsg 		}
1077ccd5a2cSjsg 
1087ccd5a2cSjsg 		/* uh oh, we lost out, drop every reservation and try
1097ccd5a2cSjsg 		 * to only reserve this buffer, then start over if
1107ccd5a2cSjsg 		 * this succeeds.
1117ccd5a2cSjsg 		 */
1127ccd5a2cSjsg 		ttm_eu_backoff_reservation_reverse(list, entry);
1137ccd5a2cSjsg 
1147f4dd379Sjsg 		if (ret == -EDEADLK) {
115ad8b1aafSjsg 			ret = ttm_bo_reserve_slowpath(bo, intr, ticket);
1167f4dd379Sjsg 		}
1177ccd5a2cSjsg 
1181bb76ff1Sjsg 		if (!ret)
1191bb76ff1Sjsg 			ret = dma_resv_reserve_fences(bo->base.resv,
1201bb76ff1Sjsg 						      num_fences);
1217ccd5a2cSjsg 
1221099013bSjsg 		if (unlikely(ret != 0)) {
1237ccd5a2cSjsg 			if (ticket) {
1247ccd5a2cSjsg 				ww_acquire_done(ticket);
1257ccd5a2cSjsg 				ww_acquire_fini(ticket);
1261099013bSjsg 			}
1271099013bSjsg 			return ret;
1281099013bSjsg 		}
1291099013bSjsg 
1307ccd5a2cSjsg 		/* move this item to the front of the list,
1317ccd5a2cSjsg 		 * forces correct iteration of the loop without keeping track
1327ccd5a2cSjsg 		 */
1337ccd5a2cSjsg 		list_del(&entry->head);
1347ccd5a2cSjsg 		list_add(&entry->head, list);
1351099013bSjsg 	}
1361099013bSjsg 
1371099013bSjsg 	return 0;
1381099013bSjsg }
1391099013bSjsg EXPORT_SYMBOL(ttm_eu_reserve_buffers);
1401099013bSjsg 
ttm_eu_fence_buffer_objects(struct ww_acquire_ctx * ticket,struct list_head * list,struct dma_fence * fence)1417ccd5a2cSjsg void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
1427f4dd379Sjsg 				 struct list_head *list,
1437f4dd379Sjsg 				 struct dma_fence *fence)
1441099013bSjsg {
1451099013bSjsg 	struct ttm_validate_buffer *entry;
1461099013bSjsg 
1471099013bSjsg 	if (list_empty(list))
1481099013bSjsg 		return;
1491099013bSjsg 
1501099013bSjsg 	list_for_each_entry(entry, list, head) {
151c349dbc7Sjsg 		struct ttm_buffer_object *bo = entry->bo;
152c349dbc7Sjsg 
1531bb76ff1Sjsg 		dma_resv_add_fence(bo->base.resv, fence, entry->num_shared ?
1541bb76ff1Sjsg 				   DMA_RESV_USAGE_READ : DMA_RESV_USAGE_WRITE);
1555ca02815Sjsg 		ttm_bo_move_to_lru_tail_unlocked(bo);
156c349dbc7Sjsg 		dma_resv_unlock(bo->base.resv);
1571099013bSjsg 	}
1587ccd5a2cSjsg 	if (ticket)
1597ccd5a2cSjsg 		ww_acquire_fini(ticket);
1601099013bSjsg }
1611099013bSjsg EXPORT_SYMBOL(ttm_eu_fence_buffer_objects);
162