xref: /netbsd-src/sys/external/bsd/drm2/dist/drm/amd/amdgpu/amdgpu_ctx.h (revision 41ec02673d281bbb3d38e6c78504ce6e30c228c1)
1*41ec0267Sriastradh /*	$NetBSD: amdgpu_ctx.h,v 1.2 2021/12/18 23:44:58 riastradh Exp $	*/
24e390cabSriastradh 
34e390cabSriastradh /*
44e390cabSriastradh  * Copyright 2018 Advanced Micro Devices, Inc.
54e390cabSriastradh  *
64e390cabSriastradh  * Permission is hereby granted, free of charge, to any person obtaining a
74e390cabSriastradh  * copy of this software and associated documentation files (the "Software"),
84e390cabSriastradh  * to deal in the Software without restriction, including without limitation
94e390cabSriastradh  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
104e390cabSriastradh  * and/or sell copies of the Software, and to permit persons to whom the
114e390cabSriastradh  * Software is furnished to do so, subject to the following conditions:
124e390cabSriastradh  *
134e390cabSriastradh  * The above copyright notice and this permission notice shall be included in
144e390cabSriastradh  * all copies or substantial portions of the Software.
154e390cabSriastradh  *
164e390cabSriastradh  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
174e390cabSriastradh  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
184e390cabSriastradh  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
194e390cabSriastradh  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
204e390cabSriastradh  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
214e390cabSriastradh  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
224e390cabSriastradh  * OTHER DEALINGS IN THE SOFTWARE.
234e390cabSriastradh  *
244e390cabSriastradh  */
254e390cabSriastradh #ifndef __AMDGPU_CTX_H__
264e390cabSriastradh #define __AMDGPU_CTX_H__
274e390cabSriastradh 
284e390cabSriastradh #include "amdgpu_ring.h"
294e390cabSriastradh 
304e390cabSriastradh struct drm_device;
314e390cabSriastradh struct drm_file;
324e390cabSriastradh struct amdgpu_fpriv;
334e390cabSriastradh 
344e390cabSriastradh #define AMDGPU_MAX_ENTITY_NUM 4
354e390cabSriastradh 
364e390cabSriastradh struct amdgpu_ctx_entity {
374e390cabSriastradh 	uint64_t		sequence;
384e390cabSriastradh 	struct drm_sched_entity	entity;
394e390cabSriastradh 	struct dma_fence	*fences[];
404e390cabSriastradh };
414e390cabSriastradh 
424e390cabSriastradh struct amdgpu_ctx {
434e390cabSriastradh 	struct kref			refcount;
444e390cabSriastradh 	struct amdgpu_device		*adev;
454e390cabSriastradh 	unsigned			reset_counter;
464e390cabSriastradh 	unsigned			reset_counter_query;
474e390cabSriastradh 	uint32_t			vram_lost_counter;
484e390cabSriastradh 	spinlock_t			ring_lock;
494e390cabSriastradh 	struct amdgpu_ctx_entity	*entities[AMDGPU_HW_IP_NUM][AMDGPU_MAX_ENTITY_NUM];
504e390cabSriastradh 	bool				preamble_presented;
514e390cabSriastradh 	enum drm_sched_priority		init_priority;
524e390cabSriastradh 	enum drm_sched_priority		override_priority;
534e390cabSriastradh 	struct mutex			lock;
544e390cabSriastradh 	atomic_t			guilty;
554e390cabSriastradh 	unsigned long			ras_counter_ce;
564e390cabSriastradh 	unsigned long			ras_counter_ue;
574e390cabSriastradh };
584e390cabSriastradh 
594e390cabSriastradh struct amdgpu_ctx_mgr {
604e390cabSriastradh 	struct amdgpu_device	*adev;
614e390cabSriastradh 	struct mutex		lock;
624e390cabSriastradh 	/* protected by lock */
634e390cabSriastradh 	struct idr		ctx_handles;
644e390cabSriastradh };
654e390cabSriastradh 
664e390cabSriastradh extern const unsigned int amdgpu_ctx_num_entities[AMDGPU_HW_IP_NUM];
674e390cabSriastradh 
684e390cabSriastradh struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id);
694e390cabSriastradh int amdgpu_ctx_put(struct amdgpu_ctx *ctx);
704e390cabSriastradh 
714e390cabSriastradh int amdgpu_ctx_get_entity(struct amdgpu_ctx *ctx, u32 hw_ip, u32 instance,
724e390cabSriastradh 			  u32 ring, struct drm_sched_entity **entity);
734e390cabSriastradh void amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx,
744e390cabSriastradh 			  struct drm_sched_entity *entity,
754e390cabSriastradh 			  struct dma_fence *fence, uint64_t *seq);
764e390cabSriastradh struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
774e390cabSriastradh 				       struct drm_sched_entity *entity,
784e390cabSriastradh 				       uint64_t seq);
794e390cabSriastradh void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx,
804e390cabSriastradh 				  enum drm_sched_priority priority);
814e390cabSriastradh 
824e390cabSriastradh int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
834e390cabSriastradh 		     struct drm_file *filp);
844e390cabSriastradh 
854e390cabSriastradh int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx,
864e390cabSriastradh 			       struct drm_sched_entity *entity);
874e390cabSriastradh 
884e390cabSriastradh void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr);
894e390cabSriastradh void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr);
904e390cabSriastradh long amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr, long timeout);
914e390cabSriastradh void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr);
924e390cabSriastradh 
934e390cabSriastradh void amdgpu_ctx_init_sched(struct amdgpu_device *adev);
944e390cabSriastradh 
954e390cabSriastradh 
964e390cabSriastradh #endif
97