xref: /openbsd-src/sys/dev/pci/drm/amd/amdgpu/amdgpu_ids.c (revision 742c464ec65e79fe4450ed256715306de4a73d0b)
1fb4d8502Sjsg /*
2fb4d8502Sjsg  * Copyright 2017 Advanced Micro Devices, Inc.
3fb4d8502Sjsg  *
4fb4d8502Sjsg  * Permission is hereby granted, free of charge, to any person obtaining a
5fb4d8502Sjsg  * copy of this software and associated documentation files (the "Software"),
6fb4d8502Sjsg  * to deal in the Software without restriction, including without limitation
7fb4d8502Sjsg  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8fb4d8502Sjsg  * and/or sell copies of the Software, and to permit persons to whom the
9fb4d8502Sjsg  * Software is furnished to do so, subject to the following conditions:
10fb4d8502Sjsg  *
11fb4d8502Sjsg  * The above copyright notice and this permission notice shall be included in
12fb4d8502Sjsg  * all copies or substantial portions of the Software.
13fb4d8502Sjsg  *
14fb4d8502Sjsg  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15fb4d8502Sjsg  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16fb4d8502Sjsg  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17fb4d8502Sjsg  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18fb4d8502Sjsg  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19fb4d8502Sjsg  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20fb4d8502Sjsg  * OTHER DEALINGS IN THE SOFTWARE.
21fb4d8502Sjsg  *
22fb4d8502Sjsg  */
23fb4d8502Sjsg #include "amdgpu_ids.h"
24fb4d8502Sjsg 
25fb4d8502Sjsg #include <linux/idr.h>
26fb4d8502Sjsg #include <linux/dma-fence-array.h>
27c349dbc7Sjsg 
28fb4d8502Sjsg 
29fb4d8502Sjsg #include "amdgpu.h"
30fb4d8502Sjsg #include "amdgpu_trace.h"
31fb4d8502Sjsg 
32fb4d8502Sjsg /*
33fb4d8502Sjsg  * PASID manager
34fb4d8502Sjsg  *
35fb4d8502Sjsg  * PASIDs are global address space identifiers that can be shared
36fb4d8502Sjsg  * between the GPU, an IOMMU and the driver. VMs on different devices
37fb4d8502Sjsg  * may use the same PASID if they share the same address
38fb4d8502Sjsg  * space. Therefore PASIDs are allocated using a global IDA. VMs are
39fb4d8502Sjsg  * looked up from the PASID per amdgpu_device.
40fb4d8502Sjsg  */
41fb4d8502Sjsg static DEFINE_IDA(amdgpu_pasid_ida);
42fb4d8502Sjsg 
43fb4d8502Sjsg /* Helper to free pasid from a fence callback */
44fb4d8502Sjsg struct amdgpu_pasid_cb {
45fb4d8502Sjsg 	struct dma_fence_cb cb;
46ad8b1aafSjsg 	u32 pasid;
47fb4d8502Sjsg };
48fb4d8502Sjsg 
49fb4d8502Sjsg /**
50fb4d8502Sjsg  * amdgpu_pasid_alloc - Allocate a PASID
51fb4d8502Sjsg  * @bits: Maximum width of the PASID in bits, must be at least 1
52fb4d8502Sjsg  *
53fb4d8502Sjsg  * Allocates a PASID of the given width while keeping smaller PASIDs
54fb4d8502Sjsg  * available if possible.
55fb4d8502Sjsg  *
56fb4d8502Sjsg  * Returns a positive integer on success. Returns %-EINVAL if bits==0.
57fb4d8502Sjsg  * Returns %-ENOSPC if no PASID was available. Returns %-ENOMEM on
58fb4d8502Sjsg  * memory allocation failure.
59fb4d8502Sjsg  */
60fb4d8502Sjsg int amdgpu_pasid_alloc(unsigned int bits)
61fb4d8502Sjsg {
62fb4d8502Sjsg 	int pasid = -EINVAL;
63fb4d8502Sjsg 
64fb4d8502Sjsg 	for (bits = min(bits, 31U); bits > 0; bits--) {
65fb4d8502Sjsg 		pasid = ida_simple_get(&amdgpu_pasid_ida,
66fb4d8502Sjsg 				       1U << (bits - 1), 1U << bits,
67fb4d8502Sjsg 				       GFP_KERNEL);
68fb4d8502Sjsg 		if (pasid != -ENOSPC)
69fb4d8502Sjsg 			break;
70fb4d8502Sjsg 	}
71fb4d8502Sjsg 
72fb4d8502Sjsg 	if (pasid >= 0)
73fb4d8502Sjsg 		trace_amdgpu_pasid_allocated(pasid);
74fb4d8502Sjsg 
75fb4d8502Sjsg 	return pasid;
76fb4d8502Sjsg }
77fb4d8502Sjsg 
78fb4d8502Sjsg /**
79fb4d8502Sjsg  * amdgpu_pasid_free - Free a PASID
80fb4d8502Sjsg  * @pasid: PASID to free
81fb4d8502Sjsg  */
82ad8b1aafSjsg void amdgpu_pasid_free(u32 pasid)
83fb4d8502Sjsg {
84fb4d8502Sjsg 	trace_amdgpu_pasid_freed(pasid);
85fb4d8502Sjsg 	ida_simple_remove(&amdgpu_pasid_ida, pasid);
86fb4d8502Sjsg }
87fb4d8502Sjsg 
88fb4d8502Sjsg static void amdgpu_pasid_free_cb(struct dma_fence *fence,
89fb4d8502Sjsg 				 struct dma_fence_cb *_cb)
90fb4d8502Sjsg {
91fb4d8502Sjsg 	struct amdgpu_pasid_cb *cb =
92fb4d8502Sjsg 		container_of(_cb, struct amdgpu_pasid_cb, cb);
93fb4d8502Sjsg 
94fb4d8502Sjsg 	amdgpu_pasid_free(cb->pasid);
95fb4d8502Sjsg 	dma_fence_put(fence);
96fb4d8502Sjsg 	kfree(cb);
97fb4d8502Sjsg }
98fb4d8502Sjsg 
99fb4d8502Sjsg /**
100fb4d8502Sjsg  * amdgpu_pasid_free_delayed - free pasid when fences signal
101fb4d8502Sjsg  *
102fb4d8502Sjsg  * @resv: reservation object with the fences to wait for
103fb4d8502Sjsg  * @pasid: pasid to free
104fb4d8502Sjsg  *
105fb4d8502Sjsg  * Free the pasid only after all the fences in resv are signaled.
106fb4d8502Sjsg  */
107c349dbc7Sjsg void amdgpu_pasid_free_delayed(struct dma_resv *resv,
108ad8b1aafSjsg 			       u32 pasid)
109fb4d8502Sjsg {
110fb4d8502Sjsg 	struct amdgpu_pasid_cb *cb;
1111bb76ff1Sjsg 	struct dma_fence *fence;
112fb4d8502Sjsg 	int r;
113fb4d8502Sjsg 
1141bb76ff1Sjsg 	r = dma_resv_get_singleton(resv, DMA_RESV_USAGE_BOOKKEEP, &fence);
115fb4d8502Sjsg 	if (r)
116fb4d8502Sjsg 		goto fallback;
117fb4d8502Sjsg 
1181bb76ff1Sjsg 	if (!fence) {
119fb4d8502Sjsg 		amdgpu_pasid_free(pasid);
120fb4d8502Sjsg 		return;
121fb4d8502Sjsg 	}
122fb4d8502Sjsg 
123fb4d8502Sjsg 	cb = kmalloc(sizeof(*cb), GFP_KERNEL);
124fb4d8502Sjsg 	if (!cb) {
125fb4d8502Sjsg 		/* Last resort when we are OOM */
126fb4d8502Sjsg 		dma_fence_wait(fence, false);
127fb4d8502Sjsg 		dma_fence_put(fence);
128fb4d8502Sjsg 		amdgpu_pasid_free(pasid);
129fb4d8502Sjsg 	} else {
130fb4d8502Sjsg 		cb->pasid = pasid;
131fb4d8502Sjsg 		if (dma_fence_add_callback(fence, &cb->cb,
132fb4d8502Sjsg 					   amdgpu_pasid_free_cb))
133fb4d8502Sjsg 			amdgpu_pasid_free_cb(fence, &cb->cb);
134fb4d8502Sjsg 	}
135fb4d8502Sjsg 
136fb4d8502Sjsg 	return;
137fb4d8502Sjsg 
138fb4d8502Sjsg fallback:
139fb4d8502Sjsg 	/* Not enough memory for the delayed delete, as last resort
140fb4d8502Sjsg 	 * block for all the fences to complete.
141fb4d8502Sjsg 	 */
1421bb76ff1Sjsg 	dma_resv_wait_timeout(resv, DMA_RESV_USAGE_BOOKKEEP,
1431bb76ff1Sjsg 			      false, MAX_SCHEDULE_TIMEOUT);
144fb4d8502Sjsg 	amdgpu_pasid_free(pasid);
145fb4d8502Sjsg }
146fb4d8502Sjsg 
147fb4d8502Sjsg /*
148fb4d8502Sjsg  * VMID manager
149fb4d8502Sjsg  *
150fb4d8502Sjsg  * VMIDs are a per VMHUB identifier for page tables handling.
151fb4d8502Sjsg  */
152fb4d8502Sjsg 
153fb4d8502Sjsg /**
154fb4d8502Sjsg  * amdgpu_vmid_had_gpu_reset - check if reset occured since last use
155fb4d8502Sjsg  *
156fb4d8502Sjsg  * @adev: amdgpu_device pointer
157fb4d8502Sjsg  * @id: VMID structure
158fb4d8502Sjsg  *
159fb4d8502Sjsg  * Check if GPU reset occured since last use of the VMID.
160fb4d8502Sjsg  */
161fb4d8502Sjsg bool amdgpu_vmid_had_gpu_reset(struct amdgpu_device *adev,
162fb4d8502Sjsg 			       struct amdgpu_vmid *id)
163fb4d8502Sjsg {
164fb4d8502Sjsg 	return id->current_gpu_reset_count !=
165fb4d8502Sjsg 		atomic_read(&adev->gpu_reset_counter);
166fb4d8502Sjsg }
167fb4d8502Sjsg 
168f005ef32Sjsg /* Check if we need to switch to another set of resources */
169f005ef32Sjsg static bool amdgpu_vmid_gds_switch_needed(struct amdgpu_vmid *id,
170f005ef32Sjsg 					  struct amdgpu_job *job)
171f005ef32Sjsg {
172f005ef32Sjsg 	return id->gds_base != job->gds_base ||
173f005ef32Sjsg 		id->gds_size != job->gds_size ||
174f005ef32Sjsg 		id->gws_base != job->gws_base ||
175f005ef32Sjsg 		id->gws_size != job->gws_size ||
176f005ef32Sjsg 		id->oa_base != job->oa_base ||
177f005ef32Sjsg 		id->oa_size != job->oa_size;
178f005ef32Sjsg }
179f005ef32Sjsg 
180f005ef32Sjsg /* Check if the id is compatible with the job */
181f005ef32Sjsg static bool amdgpu_vmid_compatible(struct amdgpu_vmid *id,
182f005ef32Sjsg 				   struct amdgpu_job *job)
183f005ef32Sjsg {
184f005ef32Sjsg 	return  id->pd_gpu_addr == job->vm_pd_addr &&
185f005ef32Sjsg 		!amdgpu_vmid_gds_switch_needed(id, job);
186f005ef32Sjsg }
187f005ef32Sjsg 
188fb4d8502Sjsg /**
1895ca02815Sjsg  * amdgpu_vmid_grab_idle - grab idle VMID
190fb4d8502Sjsg  *
191fb4d8502Sjsg  * @vm: vm to allocate id for
192fb4d8502Sjsg  * @ring: ring we want to submit job to
193fb4d8502Sjsg  * @idle: resulting idle VMID
194f005ef32Sjsg  * @fence: fence to wait for if no id could be grabbed
195fb4d8502Sjsg  *
196fb4d8502Sjsg  * Try to find an idle VMID, if none is idle add a fence to wait to the sync
197fb4d8502Sjsg  * object. Returns -ENOMEM when we are out of memory.
198fb4d8502Sjsg  */
199fb4d8502Sjsg static int amdgpu_vmid_grab_idle(struct amdgpu_vm *vm,
200fb4d8502Sjsg 				 struct amdgpu_ring *ring,
201f005ef32Sjsg 				 struct amdgpu_vmid **idle,
202f005ef32Sjsg 				 struct dma_fence **fence)
203fb4d8502Sjsg {
204fb4d8502Sjsg 	struct amdgpu_device *adev = ring->adev;
205f005ef32Sjsg 	unsigned vmhub = ring->vm_hub;
206fb4d8502Sjsg 	struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
207fb4d8502Sjsg 	struct dma_fence **fences;
208fb4d8502Sjsg 	unsigned i;
209fb4d8502Sjsg 
210f005ef32Sjsg 	if (!dma_fence_is_signaled(ring->vmid_wait)) {
211f005ef32Sjsg 		*fence = dma_fence_get(ring->vmid_wait);
212f005ef32Sjsg 		return 0;
213f005ef32Sjsg 	}
214fb4d8502Sjsg 
2155ca02815Sjsg 	fences = kmalloc_array(id_mgr->num_ids, sizeof(void *), GFP_KERNEL);
216fb4d8502Sjsg 	if (!fences)
217fb4d8502Sjsg 		return -ENOMEM;
218fb4d8502Sjsg 
219fb4d8502Sjsg 	/* Check if we have an idle VMID */
220fb4d8502Sjsg 	i = 0;
221fb4d8502Sjsg 	list_for_each_entry((*idle), &id_mgr->ids_lru, list) {
222ad8b1aafSjsg 		/* Don't use per engine and per process VMID at the same time */
223ad8b1aafSjsg 		struct amdgpu_ring *r = adev->vm_manager.concurrent_flush ?
224ad8b1aafSjsg 			NULL : ring;
225ad8b1aafSjsg 
226ad8b1aafSjsg 		fences[i] = amdgpu_sync_peek_fence(&(*idle)->active, r);
227fb4d8502Sjsg 		if (!fences[i])
228fb4d8502Sjsg 			break;
229fb4d8502Sjsg 		++i;
230fb4d8502Sjsg 	}
231fb4d8502Sjsg 
232fb4d8502Sjsg 	/* If we can't find a idle VMID to use, wait till one becomes available */
233fb4d8502Sjsg 	if (&(*idle)->list == &id_mgr->ids_lru) {
234fb4d8502Sjsg 		u64 fence_context = adev->vm_manager.fence_context + ring->idx;
235fb4d8502Sjsg 		unsigned seqno = ++adev->vm_manager.seqno[ring->idx];
236fb4d8502Sjsg 		struct dma_fence_array *array;
237fb4d8502Sjsg 		unsigned j;
238fb4d8502Sjsg 
239fb4d8502Sjsg 		*idle = NULL;
240fb4d8502Sjsg 		for (j = 0; j < i; ++j)
241fb4d8502Sjsg 			dma_fence_get(fences[j]);
242fb4d8502Sjsg 
243fb4d8502Sjsg 		array = dma_fence_array_create(i, fences, fence_context,
244fb4d8502Sjsg 					       seqno, true);
245fb4d8502Sjsg 		if (!array) {
246fb4d8502Sjsg 			for (j = 0; j < i; ++j)
247fb4d8502Sjsg 				dma_fence_put(fences[j]);
248fb4d8502Sjsg 			kfree(fences);
249fb4d8502Sjsg 			return -ENOMEM;
250fb4d8502Sjsg 		}
251fb4d8502Sjsg 
252f005ef32Sjsg 		*fence = dma_fence_get(&array->base);
253fb4d8502Sjsg 		dma_fence_put(ring->vmid_wait);
254fb4d8502Sjsg 		ring->vmid_wait = &array->base;
255f005ef32Sjsg 		return 0;
256fb4d8502Sjsg 	}
257fb4d8502Sjsg 	kfree(fences);
258fb4d8502Sjsg 
259fb4d8502Sjsg 	return 0;
260fb4d8502Sjsg }
261fb4d8502Sjsg 
262fb4d8502Sjsg /**
2635ca02815Sjsg  * amdgpu_vmid_grab_reserved - try to assign reserved VMID
264fb4d8502Sjsg  *
265fb4d8502Sjsg  * @vm: vm to allocate id for
266fb4d8502Sjsg  * @ring: ring we want to submit job to
267fb4d8502Sjsg  * @job: job who wants to use the VMID
2685ca02815Sjsg  * @id: resulting VMID
269f005ef32Sjsg  * @fence: fence to wait for if no id could be grabbed
270fb4d8502Sjsg  *
271fb4d8502Sjsg  * Try to assign a reserved VMID.
272fb4d8502Sjsg  */
273fb4d8502Sjsg static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
274fb4d8502Sjsg 				     struct amdgpu_ring *ring,
275fb4d8502Sjsg 				     struct amdgpu_job *job,
276f005ef32Sjsg 				     struct amdgpu_vmid **id,
277f005ef32Sjsg 				     struct dma_fence **fence)
278fb4d8502Sjsg {
279fb4d8502Sjsg 	struct amdgpu_device *adev = ring->adev;
280f005ef32Sjsg 	unsigned vmhub = ring->vm_hub;
281f005ef32Sjsg 	struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
282fb4d8502Sjsg 	uint64_t fence_context = adev->fence_context + ring->idx;
283fb4d8502Sjsg 	bool needs_flush = vm->use_cpu_for_update;
2841bb76ff1Sjsg 	uint64_t updates = amdgpu_vm_tlb_seq(vm);
2851bb76ff1Sjsg 	int r;
286fb4d8502Sjsg 
287f005ef32Sjsg 	*id = id_mgr->reserved;
288ad8b1aafSjsg 	if ((*id)->owner != vm->immediate.fence_context ||
289f005ef32Sjsg 	    !amdgpu_vmid_compatible(*id, job) ||
2901bb76ff1Sjsg 	    (*id)->flushed_updates < updates ||
2911bb76ff1Sjsg 	    !(*id)->last_flush ||
292fb4d8502Sjsg 	    ((*id)->last_flush->context != fence_context &&
293fb4d8502Sjsg 	     !dma_fence_is_signaled((*id)->last_flush))) {
294fb4d8502Sjsg 		struct dma_fence *tmp;
295fb4d8502Sjsg 
296ad8b1aafSjsg 		/* Don't use per engine and per process VMID at the same time */
297ad8b1aafSjsg 		if (adev->vm_manager.concurrent_flush)
298ad8b1aafSjsg 			ring = NULL;
299ad8b1aafSjsg 
300fb4d8502Sjsg 		/* to prevent one context starved by another context */
301fb4d8502Sjsg 		(*id)->pd_gpu_addr = 0;
302fb4d8502Sjsg 		tmp = amdgpu_sync_peek_fence(&(*id)->active, ring);
303fb4d8502Sjsg 		if (tmp) {
304fb4d8502Sjsg 			*id = NULL;
305f005ef32Sjsg 			*fence = dma_fence_get(tmp);
306f005ef32Sjsg 			return 0;
307fb4d8502Sjsg 		}
308fb4d8502Sjsg 		needs_flush = true;
309fb4d8502Sjsg 	}
310fb4d8502Sjsg 
311fb4d8502Sjsg 	/* Good we can use this VMID. Remember this submission as
312fb4d8502Sjsg 	* user of the VMID.
313fb4d8502Sjsg 	*/
314f005ef32Sjsg 	r = amdgpu_sync_fence(&(*id)->active, &job->base.s_fence->finished);
315fb4d8502Sjsg 	if (r)
316fb4d8502Sjsg 		return r;
317fb4d8502Sjsg 
318fb4d8502Sjsg 	job->vm_needs_flush = needs_flush;
319f005ef32Sjsg 	job->spm_update_needed = true;
320fb4d8502Sjsg 	return 0;
321fb4d8502Sjsg }
322fb4d8502Sjsg 
323fb4d8502Sjsg /**
3245ca02815Sjsg  * amdgpu_vmid_grab_used - try to reuse a VMID
325fb4d8502Sjsg  *
326fb4d8502Sjsg  * @vm: vm to allocate id for
327fb4d8502Sjsg  * @ring: ring we want to submit job to
328fb4d8502Sjsg  * @job: job who wants to use the VMID
329fb4d8502Sjsg  * @id: resulting VMID
330f005ef32Sjsg  * @fence: fence to wait for if no id could be grabbed
331fb4d8502Sjsg  *
332fb4d8502Sjsg  * Try to reuse a VMID for this submission.
333fb4d8502Sjsg  */
334fb4d8502Sjsg static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm,
335fb4d8502Sjsg 				 struct amdgpu_ring *ring,
336fb4d8502Sjsg 				 struct amdgpu_job *job,
337f005ef32Sjsg 				 struct amdgpu_vmid **id,
338f005ef32Sjsg 				 struct dma_fence **fence)
339fb4d8502Sjsg {
340fb4d8502Sjsg 	struct amdgpu_device *adev = ring->adev;
341f005ef32Sjsg 	unsigned vmhub = ring->vm_hub;
342fb4d8502Sjsg 	struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
343fb4d8502Sjsg 	uint64_t fence_context = adev->fence_context + ring->idx;
3441bb76ff1Sjsg 	uint64_t updates = amdgpu_vm_tlb_seq(vm);
345fb4d8502Sjsg 	int r;
346fb4d8502Sjsg 
347fb4d8502Sjsg 	job->vm_needs_flush = vm->use_cpu_for_update;
348fb4d8502Sjsg 
349fb4d8502Sjsg 	/* Check if we can use a VMID already assigned to this VM */
350fb4d8502Sjsg 	list_for_each_entry_reverse((*id), &id_mgr->ids_lru, list) {
351fb4d8502Sjsg 		bool needs_flush = vm->use_cpu_for_update;
352fb4d8502Sjsg 
353fb4d8502Sjsg 		/* Check all the prerequisites to using this VMID */
354ad8b1aafSjsg 		if ((*id)->owner != vm->immediate.fence_context)
355fb4d8502Sjsg 			continue;
356fb4d8502Sjsg 
357f005ef32Sjsg 		if (!amdgpu_vmid_compatible(*id, job))
358fb4d8502Sjsg 			continue;
359fb4d8502Sjsg 
360fb4d8502Sjsg 		if (!(*id)->last_flush ||
361fb4d8502Sjsg 		    ((*id)->last_flush->context != fence_context &&
362fb4d8502Sjsg 		     !dma_fence_is_signaled((*id)->last_flush)))
363fb4d8502Sjsg 			needs_flush = true;
364fb4d8502Sjsg 
3651bb76ff1Sjsg 		if ((*id)->flushed_updates < updates)
366fb4d8502Sjsg 			needs_flush = true;
367fb4d8502Sjsg 
368ad8b1aafSjsg 		if (needs_flush && !adev->vm_manager.concurrent_flush)
369fb4d8502Sjsg 			continue;
370fb4d8502Sjsg 
371fb4d8502Sjsg 		/* Good, we can use this VMID. Remember this submission as
372fb4d8502Sjsg 		 * user of the VMID.
373fb4d8502Sjsg 		 */
374f005ef32Sjsg 		r = amdgpu_sync_fence(&(*id)->active,
375f005ef32Sjsg 				      &job->base.s_fence->finished);
376fb4d8502Sjsg 		if (r)
377fb4d8502Sjsg 			return r;
378fb4d8502Sjsg 
379fb4d8502Sjsg 		job->vm_needs_flush |= needs_flush;
380fb4d8502Sjsg 		return 0;
381fb4d8502Sjsg 	}
382fb4d8502Sjsg 
383fb4d8502Sjsg 	*id = NULL;
384fb4d8502Sjsg 	return 0;
385fb4d8502Sjsg }
386fb4d8502Sjsg 
387fb4d8502Sjsg /**
3885ca02815Sjsg  * amdgpu_vmid_grab - allocate the next free VMID
389fb4d8502Sjsg  *
390fb4d8502Sjsg  * @vm: vm to allocate id for
391fb4d8502Sjsg  * @ring: ring we want to submit job to
392fb4d8502Sjsg  * @job: job who wants to use the VMID
393f005ef32Sjsg  * @fence: fence to wait for if no id could be grabbed
394fb4d8502Sjsg  *
395fb4d8502Sjsg  * Allocate an id for the vm, adding fences to the sync obj as necessary.
396fb4d8502Sjsg  */
397fb4d8502Sjsg int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
398f005ef32Sjsg 		     struct amdgpu_job *job, struct dma_fence **fence)
399fb4d8502Sjsg {
400fb4d8502Sjsg 	struct amdgpu_device *adev = ring->adev;
401f005ef32Sjsg 	unsigned vmhub = ring->vm_hub;
402fb4d8502Sjsg 	struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
403fb4d8502Sjsg 	struct amdgpu_vmid *idle = NULL;
404fb4d8502Sjsg 	struct amdgpu_vmid *id = NULL;
405fb4d8502Sjsg 	int r = 0;
406fb4d8502Sjsg 
407fb4d8502Sjsg 	mutex_lock(&id_mgr->lock);
408f005ef32Sjsg 	r = amdgpu_vmid_grab_idle(vm, ring, &idle, fence);
409fb4d8502Sjsg 	if (r || !idle)
410fb4d8502Sjsg 		goto error;
411fb4d8502Sjsg 
412*742c464eSjsg 	if (amdgpu_vmid_uses_reserved(vm, vmhub)) {
413f005ef32Sjsg 		r = amdgpu_vmid_grab_reserved(vm, ring, job, &id, fence);
414fb4d8502Sjsg 		if (r || !id)
415fb4d8502Sjsg 			goto error;
416fb4d8502Sjsg 	} else {
417f005ef32Sjsg 		r = amdgpu_vmid_grab_used(vm, ring, job, &id, fence);
418fb4d8502Sjsg 		if (r)
419fb4d8502Sjsg 			goto error;
420fb4d8502Sjsg 
421fb4d8502Sjsg 		if (!id) {
422fb4d8502Sjsg 			/* Still no ID to use? Then use the idle one found earlier */
423fb4d8502Sjsg 			id = idle;
424fb4d8502Sjsg 
425fb4d8502Sjsg 			/* Remember this submission as user of the VMID */
426f005ef32Sjsg 			r = amdgpu_sync_fence(&id->active,
427f005ef32Sjsg 					      &job->base.s_fence->finished);
428fb4d8502Sjsg 			if (r)
429fb4d8502Sjsg 				goto error;
430fb4d8502Sjsg 
431fb4d8502Sjsg 			job->vm_needs_flush = true;
432fb4d8502Sjsg 		}
433fb4d8502Sjsg 
434fb4d8502Sjsg 		list_move_tail(&id->list, &id_mgr->ids_lru);
435fb4d8502Sjsg 	}
436fb4d8502Sjsg 
437f005ef32Sjsg 	job->gds_switch_needed = amdgpu_vmid_gds_switch_needed(id, job);
438fb4d8502Sjsg 	if (job->vm_needs_flush) {
439f005ef32Sjsg 		id->flushed_updates = amdgpu_vm_tlb_seq(vm);
440fb4d8502Sjsg 		dma_fence_put(id->last_flush);
441fb4d8502Sjsg 		id->last_flush = NULL;
442fb4d8502Sjsg 	}
443fb4d8502Sjsg 	job->vmid = id - id_mgr->ids;
444fb4d8502Sjsg 	job->pasid = vm->pasid;
445f005ef32Sjsg 
446f005ef32Sjsg 	id->gds_base = job->gds_base;
447f005ef32Sjsg 	id->gds_size = job->gds_size;
448f005ef32Sjsg 	id->gws_base = job->gws_base;
449f005ef32Sjsg 	id->gws_size = job->gws_size;
450f005ef32Sjsg 	id->oa_base = job->oa_base;
451f005ef32Sjsg 	id->oa_size = job->oa_size;
452f005ef32Sjsg 	id->pd_gpu_addr = job->vm_pd_addr;
453f005ef32Sjsg 	id->owner = vm->immediate.fence_context;
454f005ef32Sjsg 
455fb4d8502Sjsg 	trace_amdgpu_vm_grab_id(vm, ring, job);
456fb4d8502Sjsg 
457fb4d8502Sjsg error:
458fb4d8502Sjsg 	mutex_unlock(&id_mgr->lock);
459fb4d8502Sjsg 	return r;
460fb4d8502Sjsg }
461fb4d8502Sjsg 
462*742c464eSjsg /*
463*742c464eSjsg  * amdgpu_vmid_uses_reserved - check if a VM will use a reserved VMID
464*742c464eSjsg  * @vm: the VM to check
465*742c464eSjsg  * @vmhub: the VMHUB which will be used
466*742c464eSjsg  *
467*742c464eSjsg  * Returns: True if the VM will use a reserved VMID.
468*742c464eSjsg  */
469*742c464eSjsg bool amdgpu_vmid_uses_reserved(struct amdgpu_vm *vm, unsigned int vmhub)
470*742c464eSjsg {
471*742c464eSjsg 	return vm->reserved_vmid[vmhub] ||
472*742c464eSjsg 		(enforce_isolation && (vmhub == AMDGPU_GFXHUB(0)));
473*742c464eSjsg }
474*742c464eSjsg 
475fb4d8502Sjsg int amdgpu_vmid_alloc_reserved(struct amdgpu_device *adev,
476fb4d8502Sjsg 			       unsigned vmhub)
477fb4d8502Sjsg {
478fb4d8502Sjsg 	struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
479fb4d8502Sjsg 
480fb4d8502Sjsg 	mutex_lock(&id_mgr->lock);
481f005ef32Sjsg 
482f005ef32Sjsg 	++id_mgr->reserved_use_count;
483f005ef32Sjsg 	if (!id_mgr->reserved) {
484f005ef32Sjsg 		struct amdgpu_vmid *id;
485f005ef32Sjsg 
486f005ef32Sjsg 		id = list_first_entry(&id_mgr->ids_lru, struct amdgpu_vmid,
487f005ef32Sjsg 				      list);
488f005ef32Sjsg 		/* Remove from normal round robin handling */
489f005ef32Sjsg 		list_del_init(&id->list);
490f005ef32Sjsg 		id_mgr->reserved = id;
491fb4d8502Sjsg 	}
492f005ef32Sjsg 
493f005ef32Sjsg 	mutex_unlock(&id_mgr->lock);
494f005ef32Sjsg 	return 0;
495f005ef32Sjsg }
496f005ef32Sjsg 
497f005ef32Sjsg void amdgpu_vmid_free_reserved(struct amdgpu_device *adev,
498f005ef32Sjsg 			       unsigned vmhub)
499f005ef32Sjsg {
500f005ef32Sjsg 	struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
501f005ef32Sjsg 
502f005ef32Sjsg 	mutex_lock(&id_mgr->lock);
503f005ef32Sjsg 	if (!--id_mgr->reserved_use_count) {
504f005ef32Sjsg 		/* give the reserved ID back to normal round robin */
505f005ef32Sjsg 		list_add(&id_mgr->reserved->list, &id_mgr->ids_lru);
506f005ef32Sjsg 		id_mgr->reserved = NULL;
507f005ef32Sjsg 	}
508f005ef32Sjsg 
509fb4d8502Sjsg 	mutex_unlock(&id_mgr->lock);
510fb4d8502Sjsg }
511fb4d8502Sjsg 
512fb4d8502Sjsg /**
513fb4d8502Sjsg  * amdgpu_vmid_reset - reset VMID to zero
514fb4d8502Sjsg  *
515fb4d8502Sjsg  * @adev: amdgpu device structure
5165ca02815Sjsg  * @vmhub: vmhub type
517fb4d8502Sjsg  * @vmid: vmid number to use
518fb4d8502Sjsg  *
519fb4d8502Sjsg  * Reset saved GDW, GWS and OA to force switch on next flush.
520fb4d8502Sjsg  */
521fb4d8502Sjsg void amdgpu_vmid_reset(struct amdgpu_device *adev, unsigned vmhub,
522fb4d8502Sjsg 		       unsigned vmid)
523fb4d8502Sjsg {
524fb4d8502Sjsg 	struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
525fb4d8502Sjsg 	struct amdgpu_vmid *id = &id_mgr->ids[vmid];
526fb4d8502Sjsg 
527fb4d8502Sjsg 	mutex_lock(&id_mgr->lock);
528fb4d8502Sjsg 	id->owner = 0;
529fb4d8502Sjsg 	id->gds_base = 0;
530fb4d8502Sjsg 	id->gds_size = 0;
531fb4d8502Sjsg 	id->gws_base = 0;
532fb4d8502Sjsg 	id->gws_size = 0;
533fb4d8502Sjsg 	id->oa_base = 0;
534fb4d8502Sjsg 	id->oa_size = 0;
535fb4d8502Sjsg 	mutex_unlock(&id_mgr->lock);
536fb4d8502Sjsg }
537fb4d8502Sjsg 
538fb4d8502Sjsg /**
539fb4d8502Sjsg  * amdgpu_vmid_reset_all - reset VMID to zero
540fb4d8502Sjsg  *
541fb4d8502Sjsg  * @adev: amdgpu device structure
542fb4d8502Sjsg  *
543fb4d8502Sjsg  * Reset VMID to force flush on next use
544fb4d8502Sjsg  */
545fb4d8502Sjsg void amdgpu_vmid_reset_all(struct amdgpu_device *adev)
546fb4d8502Sjsg {
547fb4d8502Sjsg 	unsigned i, j;
548fb4d8502Sjsg 
549fb4d8502Sjsg 	for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
550fb4d8502Sjsg 		struct amdgpu_vmid_mgr *id_mgr =
551fb4d8502Sjsg 			&adev->vm_manager.id_mgr[i];
552fb4d8502Sjsg 
553fb4d8502Sjsg 		for (j = 1; j < id_mgr->num_ids; ++j)
554fb4d8502Sjsg 			amdgpu_vmid_reset(adev, i, j);
555fb4d8502Sjsg 	}
556fb4d8502Sjsg }
557fb4d8502Sjsg 
558fb4d8502Sjsg /**
559fb4d8502Sjsg  * amdgpu_vmid_mgr_init - init the VMID manager
560fb4d8502Sjsg  *
561fb4d8502Sjsg  * @adev: amdgpu_device pointer
562fb4d8502Sjsg  *
563fb4d8502Sjsg  * Initialize the VM manager structures
564fb4d8502Sjsg  */
565fb4d8502Sjsg void amdgpu_vmid_mgr_init(struct amdgpu_device *adev)
566fb4d8502Sjsg {
567fb4d8502Sjsg 	unsigned i, j;
568fb4d8502Sjsg 
569fb4d8502Sjsg 	for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
570fb4d8502Sjsg 		struct amdgpu_vmid_mgr *id_mgr =
571fb4d8502Sjsg 			&adev->vm_manager.id_mgr[i];
572fb4d8502Sjsg 
573fb4d8502Sjsg 		rw_init(&id_mgr->lock, "idmgr");
574fb4d8502Sjsg 		INIT_LIST_HEAD(&id_mgr->ids_lru);
575f005ef32Sjsg 		id_mgr->reserved_use_count = 0;
576fb4d8502Sjsg 
577ad8b1aafSjsg 		/* manage only VMIDs not used by KFD */
578ad8b1aafSjsg 		id_mgr->num_ids = adev->vm_manager.first_kfd_vmid;
579ad8b1aafSjsg 
580fb4d8502Sjsg 		/* skip over VMID 0, since it is the system VM */
581fb4d8502Sjsg 		for (j = 1; j < id_mgr->num_ids; ++j) {
582fb4d8502Sjsg 			amdgpu_vmid_reset(adev, i, j);
583fb4d8502Sjsg 			amdgpu_sync_create(&id_mgr->ids[j].active);
584fb4d8502Sjsg 			list_add_tail(&id_mgr->ids[j].list, &id_mgr->ids_lru);
585fb4d8502Sjsg 		}
586fb4d8502Sjsg 	}
587f005ef32Sjsg 	/* alloc a default reserved vmid to enforce isolation */
588f005ef32Sjsg 	if (enforce_isolation)
589f005ef32Sjsg 		amdgpu_vmid_alloc_reserved(adev, AMDGPU_GFXHUB(0));
590f005ef32Sjsg 
591fb4d8502Sjsg }
592fb4d8502Sjsg 
593fb4d8502Sjsg /**
594fb4d8502Sjsg  * amdgpu_vmid_mgr_fini - cleanup VM manager
595fb4d8502Sjsg  *
596fb4d8502Sjsg  * @adev: amdgpu_device pointer
597fb4d8502Sjsg  *
598fb4d8502Sjsg  * Cleanup the VM manager and free resources.
599fb4d8502Sjsg  */
600fb4d8502Sjsg void amdgpu_vmid_mgr_fini(struct amdgpu_device *adev)
601fb4d8502Sjsg {
602fb4d8502Sjsg 	unsigned i, j;
603fb4d8502Sjsg 
604fb4d8502Sjsg 	for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
605fb4d8502Sjsg 		struct amdgpu_vmid_mgr *id_mgr =
606fb4d8502Sjsg 			&adev->vm_manager.id_mgr[i];
607fb4d8502Sjsg 
608fb4d8502Sjsg 		mutex_destroy(&id_mgr->lock);
609fb4d8502Sjsg 		for (j = 0; j < AMDGPU_NUM_VMID; ++j) {
610fb4d8502Sjsg 			struct amdgpu_vmid *id = &id_mgr->ids[j];
611fb4d8502Sjsg 
612fb4d8502Sjsg 			amdgpu_sync_free(&id->active);
613fb4d8502Sjsg 			dma_fence_put(id->last_flush);
614fb4d8502Sjsg 			dma_fence_put(id->pasid_mapping);
615fb4d8502Sjsg 		}
616fb4d8502Sjsg 	}
617fb4d8502Sjsg }
618