xref: /openbsd-src/sys/dev/pci/drm/amd/amdgpu/amdgpu_cs.c (revision 1ad61ae0a79a724d2d3ec69e69c8e1d1ff6b53a0)
1 /*
2  * Copyright 2008 Jerome Glisse.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice (including the next
13  * paragraph) shall be included in all copies or substantial portions of the
14  * Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22  * DEALINGS IN THE SOFTWARE.
23  *
24  * Authors:
25  *    Jerome Glisse <glisse@freedesktop.org>
26  */
27 
28 #include <linux/file.h>
29 #include <linux/pagemap.h>
30 #include <linux/sync_file.h>
31 #include <linux/dma-buf.h>
32 
33 #include <drm/amdgpu_drm.h>
34 #include <drm/drm_syncobj.h>
35 #include "amdgpu_cs.h"
36 #include "amdgpu.h"
37 #include "amdgpu_trace.h"
38 #include "amdgpu_gmc.h"
39 #include "amdgpu_gem.h"
40 #include "amdgpu_ras.h"
41 
42 static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p,
43 				 struct amdgpu_device *adev,
44 				 struct drm_file *filp,
45 				 union drm_amdgpu_cs *cs)
46 {
47 	struct amdgpu_fpriv *fpriv = filp->driver_priv;
48 
49 	if (cs->in.num_chunks == 0)
50 		return -EINVAL;
51 
52 	memset(p, 0, sizeof(*p));
53 	p->adev = adev;
54 	p->filp = filp;
55 
56 	p->ctx = amdgpu_ctx_get(fpriv, cs->in.ctx_id);
57 	if (!p->ctx)
58 		return -EINVAL;
59 
60 	if (atomic_read(&p->ctx->guilty)) {
61 		amdgpu_ctx_put(p->ctx);
62 		return -ECANCELED;
63 	}
64 	return 0;
65 }
66 
67 static int amdgpu_cs_job_idx(struct amdgpu_cs_parser *p,
68 			     struct drm_amdgpu_cs_chunk_ib *chunk_ib)
69 {
70 	struct drm_sched_entity *entity;
71 	unsigned int i;
72 	int r;
73 
74 	r = amdgpu_ctx_get_entity(p->ctx, chunk_ib->ip_type,
75 				  chunk_ib->ip_instance,
76 				  chunk_ib->ring, &entity);
77 	if (r)
78 		return r;
79 
80 	/*
81 	 * Abort if there is no run queue associated with this entity.
82 	 * Possibly because of disabled HW IP.
83 	 */
84 	if (entity->rq == NULL)
85 		return -EINVAL;
86 
87 	/* Check if we can add this IB to some existing job */
88 	for (i = 0; i < p->gang_size; ++i)
89 		if (p->entities[i] == entity)
90 			return i;
91 
92 	/* If not increase the gang size if possible */
93 	if (i == AMDGPU_CS_GANG_SIZE)
94 		return -EINVAL;
95 
96 	p->entities[i] = entity;
97 	p->gang_size = i + 1;
98 	return i;
99 }
100 
101 static int amdgpu_cs_p1_ib(struct amdgpu_cs_parser *p,
102 			   struct drm_amdgpu_cs_chunk_ib *chunk_ib,
103 			   unsigned int *num_ibs)
104 {
105 	int r;
106 
107 	r = amdgpu_cs_job_idx(p, chunk_ib);
108 	if (r < 0)
109 		return r;
110 
111 	++(num_ibs[r]);
112 	p->gang_leader_idx = r;
113 	return 0;
114 }
115 
116 static int amdgpu_cs_p1_user_fence(struct amdgpu_cs_parser *p,
117 				   struct drm_amdgpu_cs_chunk_fence *data,
118 				   uint32_t *offset)
119 {
120 	struct drm_gem_object *gobj;
121 	struct amdgpu_bo *bo;
122 	unsigned long size;
123 
124 	gobj = drm_gem_object_lookup(p->filp, data->handle);
125 	if (gobj == NULL)
126 		return -EINVAL;
127 
128 	bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
129 	p->uf_entry.priority = 0;
130 	p->uf_entry.tv.bo = &bo->tbo;
131 	drm_gem_object_put(gobj);
132 
133 	size = amdgpu_bo_size(bo);
134 	if (size != PAGE_SIZE || data->offset > (size - 8))
135 		return -EINVAL;
136 
137 	if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm))
138 		return -EINVAL;
139 
140 	*offset = data->offset;
141 	return 0;
142 }
143 
144 static int amdgpu_cs_p1_bo_handles(struct amdgpu_cs_parser *p,
145 				   struct drm_amdgpu_bo_list_in *data)
146 {
147 	struct drm_amdgpu_bo_list_entry *info;
148 	int r;
149 
150 	r = amdgpu_bo_create_list_entry_array(data, &info);
151 	if (r)
152 		return r;
153 
154 	r = amdgpu_bo_list_create(p->adev, p->filp, info, data->bo_number,
155 				  &p->bo_list);
156 	if (r)
157 		goto error_free;
158 
159 	kvfree(info);
160 	return 0;
161 
162 error_free:
163 	kvfree(info);
164 
165 	return r;
166 }
167 
168 /* Copy the data from userspace and go over it the first time */
169 static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p,
170 			   union drm_amdgpu_cs *cs)
171 {
172 	struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
173 	unsigned int num_ibs[AMDGPU_CS_GANG_SIZE] = { };
174 	struct amdgpu_vm *vm = &fpriv->vm;
175 	uint64_t *chunk_array_user;
176 	uint64_t *chunk_array;
177 	uint32_t uf_offset = 0;
178 	size_t size;
179 	int ret;
180 	int i;
181 
182 	chunk_array = kvmalloc_array(cs->in.num_chunks, sizeof(uint64_t),
183 				     GFP_KERNEL);
184 	if (!chunk_array)
185 		return -ENOMEM;
186 
187 	/* get chunks */
188 	chunk_array_user = u64_to_user_ptr(cs->in.chunks);
189 	if (copy_from_user(chunk_array, chunk_array_user,
190 			   sizeof(uint64_t)*cs->in.num_chunks)) {
191 		ret = -EFAULT;
192 		goto free_chunk;
193 	}
194 
195 	p->nchunks = cs->in.num_chunks;
196 	p->chunks = kvmalloc_array(p->nchunks, sizeof(struct amdgpu_cs_chunk),
197 			    GFP_KERNEL);
198 	if (!p->chunks) {
199 		ret = -ENOMEM;
200 		goto free_chunk;
201 	}
202 
203 	for (i = 0; i < p->nchunks; i++) {
204 		struct drm_amdgpu_cs_chunk __user **chunk_ptr = NULL;
205 		struct drm_amdgpu_cs_chunk user_chunk;
206 		uint32_t __user *cdata;
207 
208 		chunk_ptr = u64_to_user_ptr(chunk_array[i]);
209 		if (copy_from_user(&user_chunk, chunk_ptr,
210 				       sizeof(struct drm_amdgpu_cs_chunk))) {
211 			ret = -EFAULT;
212 			i--;
213 			goto free_partial_kdata;
214 		}
215 		p->chunks[i].chunk_id = user_chunk.chunk_id;
216 		p->chunks[i].length_dw = user_chunk.length_dw;
217 
218 		size = p->chunks[i].length_dw;
219 		cdata = u64_to_user_ptr(user_chunk.chunk_data);
220 
221 		p->chunks[i].kdata = kvmalloc_array(size, sizeof(uint32_t),
222 						    GFP_KERNEL);
223 		if (p->chunks[i].kdata == NULL) {
224 			ret = -ENOMEM;
225 			i--;
226 			goto free_partial_kdata;
227 		}
228 		size *= sizeof(uint32_t);
229 		if (copy_from_user(p->chunks[i].kdata, cdata, size)) {
230 			ret = -EFAULT;
231 			goto free_partial_kdata;
232 		}
233 
234 		/* Assume the worst on the following checks */
235 		ret = -EINVAL;
236 		switch (p->chunks[i].chunk_id) {
237 		case AMDGPU_CHUNK_ID_IB:
238 			if (size < sizeof(struct drm_amdgpu_cs_chunk_ib))
239 				goto free_partial_kdata;
240 
241 			ret = amdgpu_cs_p1_ib(p, p->chunks[i].kdata, num_ibs);
242 			if (ret)
243 				goto free_partial_kdata;
244 			break;
245 
246 		case AMDGPU_CHUNK_ID_FENCE:
247 			if (size < sizeof(struct drm_amdgpu_cs_chunk_fence))
248 				goto free_partial_kdata;
249 
250 			ret = amdgpu_cs_p1_user_fence(p, p->chunks[i].kdata,
251 						      &uf_offset);
252 			if (ret)
253 				goto free_partial_kdata;
254 			break;
255 
256 		case AMDGPU_CHUNK_ID_BO_HANDLES:
257 			if (size < sizeof(struct drm_amdgpu_bo_list_in))
258 				goto free_partial_kdata;
259 
260 			ret = amdgpu_cs_p1_bo_handles(p, p->chunks[i].kdata);
261 			if (ret)
262 				goto free_partial_kdata;
263 			break;
264 
265 		case AMDGPU_CHUNK_ID_DEPENDENCIES:
266 		case AMDGPU_CHUNK_ID_SYNCOBJ_IN:
267 		case AMDGPU_CHUNK_ID_SYNCOBJ_OUT:
268 		case AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES:
269 		case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT:
270 		case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL:
271 			break;
272 
273 		default:
274 			goto free_partial_kdata;
275 		}
276 	}
277 
278 	if (!p->gang_size) {
279 		ret = -EINVAL;
280 		goto free_all_kdata;
281 	}
282 
283 	for (i = 0; i < p->gang_size; ++i) {
284 		ret = amdgpu_job_alloc(p->adev, num_ibs[i], &p->jobs[i], vm);
285 		if (ret)
286 			goto free_all_kdata;
287 
288 		ret = drm_sched_job_init(&p->jobs[i]->base, p->entities[i],
289 					 &fpriv->vm);
290 		if (ret)
291 			goto free_all_kdata;
292 	}
293 	p->gang_leader = p->jobs[p->gang_leader_idx];
294 
295 	if (p->ctx->vram_lost_counter != p->gang_leader->vram_lost_counter) {
296 		ret = -ECANCELED;
297 		goto free_all_kdata;
298 	}
299 
300 	if (p->uf_entry.tv.bo)
301 		p->gang_leader->uf_addr = uf_offset;
302 	kvfree(chunk_array);
303 
304 	/* Use this opportunity to fill in task info for the vm */
305 	amdgpu_vm_set_task_info(vm);
306 
307 	return 0;
308 
309 free_all_kdata:
310 	i = p->nchunks - 1;
311 free_partial_kdata:
312 	for (; i >= 0; i--)
313 		kvfree(p->chunks[i].kdata);
314 	kvfree(p->chunks);
315 	p->chunks = NULL;
316 	p->nchunks = 0;
317 free_chunk:
318 	kvfree(chunk_array);
319 
320 	return ret;
321 }
322 
323 static int amdgpu_cs_p2_ib(struct amdgpu_cs_parser *p,
324 			   struct amdgpu_cs_chunk *chunk,
325 			   unsigned int *ce_preempt,
326 			   unsigned int *de_preempt)
327 {
328 	struct drm_amdgpu_cs_chunk_ib *chunk_ib = chunk->kdata;
329 	struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
330 	struct amdgpu_vm *vm = &fpriv->vm;
331 	struct amdgpu_ring *ring;
332 	struct amdgpu_job *job;
333 	struct amdgpu_ib *ib;
334 	int r;
335 
336 	r = amdgpu_cs_job_idx(p, chunk_ib);
337 	if (r < 0)
338 		return r;
339 
340 	job = p->jobs[r];
341 	ring = amdgpu_job_ring(job);
342 	ib = &job->ibs[job->num_ibs++];
343 
344 	/* MM engine doesn't support user fences */
345 	if (p->uf_entry.tv.bo && ring->funcs->no_user_fence)
346 		return -EINVAL;
347 
348 	if (chunk_ib->ip_type == AMDGPU_HW_IP_GFX &&
349 	    chunk_ib->flags & AMDGPU_IB_FLAG_PREEMPT) {
350 		if (chunk_ib->flags & AMDGPU_IB_FLAG_CE)
351 			(*ce_preempt)++;
352 		else
353 			(*de_preempt)++;
354 
355 		/* Each GFX command submit allows only 1 IB max
356 		 * preemptible for CE & DE */
357 		if (*ce_preempt > 1 || *de_preempt > 1)
358 			return -EINVAL;
359 	}
360 
361 	if (chunk_ib->flags & AMDGPU_IB_FLAG_PREAMBLE)
362 		job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT;
363 
364 	r =  amdgpu_ib_get(p->adev, vm, ring->funcs->parse_cs ?
365 			   chunk_ib->ib_bytes : 0,
366 			   AMDGPU_IB_POOL_DELAYED, ib);
367 	if (r) {
368 		DRM_ERROR("Failed to get ib !\n");
369 		return r;
370 	}
371 
372 	ib->gpu_addr = chunk_ib->va_start;
373 	ib->length_dw = chunk_ib->ib_bytes / 4;
374 	ib->flags = chunk_ib->flags;
375 	return 0;
376 }
377 
378 static int amdgpu_cs_p2_dependencies(struct amdgpu_cs_parser *p,
379 				     struct amdgpu_cs_chunk *chunk)
380 {
381 	struct drm_amdgpu_cs_chunk_dep *deps = chunk->kdata;
382 	struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
383 	unsigned num_deps;
384 	int i, r;
385 
386 	num_deps = chunk->length_dw * 4 /
387 		sizeof(struct drm_amdgpu_cs_chunk_dep);
388 
389 	for (i = 0; i < num_deps; ++i) {
390 		struct amdgpu_ctx *ctx;
391 		struct drm_sched_entity *entity;
392 		struct dma_fence *fence;
393 
394 		ctx = amdgpu_ctx_get(fpriv, deps[i].ctx_id);
395 		if (ctx == NULL)
396 			return -EINVAL;
397 
398 		r = amdgpu_ctx_get_entity(ctx, deps[i].ip_type,
399 					  deps[i].ip_instance,
400 					  deps[i].ring, &entity);
401 		if (r) {
402 			amdgpu_ctx_put(ctx);
403 			return r;
404 		}
405 
406 		fence = amdgpu_ctx_get_fence(ctx, entity, deps[i].handle);
407 		amdgpu_ctx_put(ctx);
408 
409 		if (IS_ERR(fence))
410 			return PTR_ERR(fence);
411 		else if (!fence)
412 			continue;
413 
414 		if (chunk->chunk_id == AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES) {
415 			struct drm_sched_fence *s_fence;
416 			struct dma_fence *old = fence;
417 
418 			s_fence = to_drm_sched_fence(fence);
419 			fence = dma_fence_get(&s_fence->scheduled);
420 			dma_fence_put(old);
421 		}
422 
423 		r = amdgpu_sync_fence(&p->gang_leader->sync, fence);
424 		dma_fence_put(fence);
425 		if (r)
426 			return r;
427 	}
428 	return 0;
429 }
430 
431 static int amdgpu_syncobj_lookup_and_add(struct amdgpu_cs_parser *p,
432 					 uint32_t handle, u64 point,
433 					 u64 flags)
434 {
435 	struct dma_fence *fence;
436 	int r;
437 
438 	r = drm_syncobj_find_fence(p->filp, handle, point, flags, &fence);
439 	if (r) {
440 		DRM_ERROR("syncobj %u failed to find fence @ %llu (%d)!\n",
441 			  handle, point, r);
442 		return r;
443 	}
444 
445 	r = amdgpu_sync_fence(&p->gang_leader->sync, fence);
446 	dma_fence_put(fence);
447 
448 	return r;
449 }
450 
451 static int amdgpu_cs_p2_syncobj_in(struct amdgpu_cs_parser *p,
452 				   struct amdgpu_cs_chunk *chunk)
453 {
454 	struct drm_amdgpu_cs_chunk_sem *deps = chunk->kdata;
455 	unsigned num_deps;
456 	int i, r;
457 
458 	num_deps = chunk->length_dw * 4 /
459 		sizeof(struct drm_amdgpu_cs_chunk_sem);
460 	for (i = 0; i < num_deps; ++i) {
461 		r = amdgpu_syncobj_lookup_and_add(p, deps[i].handle, 0, 0);
462 		if (r)
463 			return r;
464 	}
465 
466 	return 0;
467 }
468 
469 static int amdgpu_cs_p2_syncobj_timeline_wait(struct amdgpu_cs_parser *p,
470 					      struct amdgpu_cs_chunk *chunk)
471 {
472 	struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps = chunk->kdata;
473 	unsigned num_deps;
474 	int i, r;
475 
476 	num_deps = chunk->length_dw * 4 /
477 		sizeof(struct drm_amdgpu_cs_chunk_syncobj);
478 	for (i = 0; i < num_deps; ++i) {
479 		r = amdgpu_syncobj_lookup_and_add(p, syncobj_deps[i].handle,
480 						  syncobj_deps[i].point,
481 						  syncobj_deps[i].flags);
482 		if (r)
483 			return r;
484 	}
485 
486 	return 0;
487 }
488 
489 static int amdgpu_cs_p2_syncobj_out(struct amdgpu_cs_parser *p,
490 				    struct amdgpu_cs_chunk *chunk)
491 {
492 	struct drm_amdgpu_cs_chunk_sem *deps = chunk->kdata;
493 	unsigned num_deps;
494 	int i;
495 
496 	num_deps = chunk->length_dw * 4 /
497 		sizeof(struct drm_amdgpu_cs_chunk_sem);
498 
499 	if (p->post_deps)
500 		return -EINVAL;
501 
502 	p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps),
503 				     GFP_KERNEL);
504 	p->num_post_deps = 0;
505 
506 	if (!p->post_deps)
507 		return -ENOMEM;
508 
509 
510 	for (i = 0; i < num_deps; ++i) {
511 		p->post_deps[i].syncobj =
512 			drm_syncobj_find(p->filp, deps[i].handle);
513 		if (!p->post_deps[i].syncobj)
514 			return -EINVAL;
515 		p->post_deps[i].chain = NULL;
516 		p->post_deps[i].point = 0;
517 		p->num_post_deps++;
518 	}
519 
520 	return 0;
521 }
522 
523 static int amdgpu_cs_p2_syncobj_timeline_signal(struct amdgpu_cs_parser *p,
524 						struct amdgpu_cs_chunk *chunk)
525 {
526 	struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps = chunk->kdata;
527 	unsigned num_deps;
528 	int i;
529 
530 	num_deps = chunk->length_dw * 4 /
531 		sizeof(struct drm_amdgpu_cs_chunk_syncobj);
532 
533 	if (p->post_deps)
534 		return -EINVAL;
535 
536 	p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps),
537 				     GFP_KERNEL);
538 	p->num_post_deps = 0;
539 
540 	if (!p->post_deps)
541 		return -ENOMEM;
542 
543 	for (i = 0; i < num_deps; ++i) {
544 		struct amdgpu_cs_post_dep *dep = &p->post_deps[i];
545 
546 		dep->chain = NULL;
547 		if (syncobj_deps[i].point) {
548 			dep->chain = dma_fence_chain_alloc();
549 			if (!dep->chain)
550 				return -ENOMEM;
551 		}
552 
553 		dep->syncobj = drm_syncobj_find(p->filp,
554 						syncobj_deps[i].handle);
555 		if (!dep->syncobj) {
556 			dma_fence_chain_free(dep->chain);
557 			return -EINVAL;
558 		}
559 		dep->point = syncobj_deps[i].point;
560 		p->num_post_deps++;
561 	}
562 
563 	return 0;
564 }
565 
566 static int amdgpu_cs_pass2(struct amdgpu_cs_parser *p)
567 {
568 	unsigned int ce_preempt = 0, de_preempt = 0;
569 	int i, r;
570 
571 	for (i = 0; i < p->nchunks; ++i) {
572 		struct amdgpu_cs_chunk *chunk;
573 
574 		chunk = &p->chunks[i];
575 
576 		switch (chunk->chunk_id) {
577 		case AMDGPU_CHUNK_ID_IB:
578 			r = amdgpu_cs_p2_ib(p, chunk, &ce_preempt, &de_preempt);
579 			if (r)
580 				return r;
581 			break;
582 		case AMDGPU_CHUNK_ID_DEPENDENCIES:
583 		case AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES:
584 			r = amdgpu_cs_p2_dependencies(p, chunk);
585 			if (r)
586 				return r;
587 			break;
588 		case AMDGPU_CHUNK_ID_SYNCOBJ_IN:
589 			r = amdgpu_cs_p2_syncobj_in(p, chunk);
590 			if (r)
591 				return r;
592 			break;
593 		case AMDGPU_CHUNK_ID_SYNCOBJ_OUT:
594 			r = amdgpu_cs_p2_syncobj_out(p, chunk);
595 			if (r)
596 				return r;
597 			break;
598 		case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT:
599 			r = amdgpu_cs_p2_syncobj_timeline_wait(p, chunk);
600 			if (r)
601 				return r;
602 			break;
603 		case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL:
604 			r = amdgpu_cs_p2_syncobj_timeline_signal(p, chunk);
605 			if (r)
606 				return r;
607 			break;
608 		}
609 	}
610 
611 	return 0;
612 }
613 
614 /* Convert microseconds to bytes. */
615 static u64 us_to_bytes(struct amdgpu_device *adev, s64 us)
616 {
617 	if (us <= 0 || !adev->mm_stats.log2_max_MBps)
618 		return 0;
619 
620 	/* Since accum_us is incremented by a million per second, just
621 	 * multiply it by the number of MB/s to get the number of bytes.
622 	 */
623 	return us << adev->mm_stats.log2_max_MBps;
624 }
625 
626 static s64 bytes_to_us(struct amdgpu_device *adev, u64 bytes)
627 {
628 	if (!adev->mm_stats.log2_max_MBps)
629 		return 0;
630 
631 	return bytes >> adev->mm_stats.log2_max_MBps;
632 }
633 
634 /* Returns how many bytes TTM can move right now. If no bytes can be moved,
635  * it returns 0. If it returns non-zero, it's OK to move at least one buffer,
636  * which means it can go over the threshold once. If that happens, the driver
637  * will be in debt and no other buffer migrations can be done until that debt
638  * is repaid.
639  *
640  * This approach allows moving a buffer of any size (it's important to allow
641  * that).
642  *
643  * The currency is simply time in microseconds and it increases as the clock
644  * ticks. The accumulated microseconds (us) are converted to bytes and
645  * returned.
646  */
647 static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev,
648 					      u64 *max_bytes,
649 					      u64 *max_vis_bytes)
650 {
651 	s64 time_us, increment_us;
652 	u64 free_vram, total_vram, used_vram;
653 	/* Allow a maximum of 200 accumulated ms. This is basically per-IB
654 	 * throttling.
655 	 *
656 	 * It means that in order to get full max MBps, at least 5 IBs per
657 	 * second must be submitted and not more than 200ms apart from each
658 	 * other.
659 	 */
660 	const s64 us_upper_bound = 200000;
661 
662 	if (!adev->mm_stats.log2_max_MBps) {
663 		*max_bytes = 0;
664 		*max_vis_bytes = 0;
665 		return;
666 	}
667 
668 	total_vram = adev->gmc.real_vram_size - atomic64_read(&adev->vram_pin_size);
669 	used_vram = ttm_resource_manager_usage(&adev->mman.vram_mgr.manager);
670 	free_vram = used_vram >= total_vram ? 0 : total_vram - used_vram;
671 
672 	spin_lock(&adev->mm_stats.lock);
673 
674 	/* Increase the amount of accumulated us. */
675 	time_us = ktime_to_us(ktime_get());
676 	increment_us = time_us - adev->mm_stats.last_update_us;
677 	adev->mm_stats.last_update_us = time_us;
678 	adev->mm_stats.accum_us = min(adev->mm_stats.accum_us + increment_us,
679 				      us_upper_bound);
680 
681 	/* This prevents the short period of low performance when the VRAM
682 	 * usage is low and the driver is in debt or doesn't have enough
683 	 * accumulated us to fill VRAM quickly.
684 	 *
685 	 * The situation can occur in these cases:
686 	 * - a lot of VRAM is freed by userspace
687 	 * - the presence of a big buffer causes a lot of evictions
688 	 *   (solution: split buffers into smaller ones)
689 	 *
690 	 * If 128 MB or 1/8th of VRAM is free, start filling it now by setting
691 	 * accum_us to a positive number.
692 	 */
693 	if (free_vram >= 128 * 1024 * 1024 || free_vram >= total_vram / 8) {
694 		s64 min_us;
695 
696 		/* Be more aggressive on dGPUs. Try to fill a portion of free
697 		 * VRAM now.
698 		 */
699 		if (!(adev->flags & AMD_IS_APU))
700 			min_us = bytes_to_us(adev, free_vram / 4);
701 		else
702 			min_us = 0; /* Reset accum_us on APUs. */
703 
704 		adev->mm_stats.accum_us = max(min_us, adev->mm_stats.accum_us);
705 	}
706 
707 	/* This is set to 0 if the driver is in debt to disallow (optional)
708 	 * buffer moves.
709 	 */
710 	*max_bytes = us_to_bytes(adev, adev->mm_stats.accum_us);
711 
712 	/* Do the same for visible VRAM if half of it is free */
713 	if (!amdgpu_gmc_vram_full_visible(&adev->gmc)) {
714 		u64 total_vis_vram = adev->gmc.visible_vram_size;
715 		u64 used_vis_vram =
716 		  amdgpu_vram_mgr_vis_usage(&adev->mman.vram_mgr);
717 
718 		if (used_vis_vram < total_vis_vram) {
719 			u64 free_vis_vram = total_vis_vram - used_vis_vram;
720 			adev->mm_stats.accum_us_vis = min(adev->mm_stats.accum_us_vis +
721 							  increment_us, us_upper_bound);
722 
723 			if (free_vis_vram >= total_vis_vram / 2)
724 				adev->mm_stats.accum_us_vis =
725 					max(bytes_to_us(adev, free_vis_vram / 2),
726 					    adev->mm_stats.accum_us_vis);
727 		}
728 
729 		*max_vis_bytes = us_to_bytes(adev, adev->mm_stats.accum_us_vis);
730 	} else {
731 		*max_vis_bytes = 0;
732 	}
733 
734 	spin_unlock(&adev->mm_stats.lock);
735 }
736 
737 /* Report how many bytes have really been moved for the last command
738  * submission. This can result in a debt that can stop buffer migrations
739  * temporarily.
740  */
741 void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes,
742 				  u64 num_vis_bytes)
743 {
744 	spin_lock(&adev->mm_stats.lock);
745 	adev->mm_stats.accum_us -= bytes_to_us(adev, num_bytes);
746 	adev->mm_stats.accum_us_vis -= bytes_to_us(adev, num_vis_bytes);
747 	spin_unlock(&adev->mm_stats.lock);
748 }
749 
750 static int amdgpu_cs_bo_validate(void *param, struct amdgpu_bo *bo)
751 {
752 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
753 	struct amdgpu_cs_parser *p = param;
754 	struct ttm_operation_ctx ctx = {
755 		.interruptible = true,
756 		.no_wait_gpu = false,
757 		.resv = bo->tbo.base.resv
758 	};
759 	uint32_t domain;
760 	int r;
761 
762 	if (bo->tbo.pin_count)
763 		return 0;
764 
765 	/* Don't move this buffer if we have depleted our allowance
766 	 * to move it. Don't move anything if the threshold is zero.
767 	 */
768 	if (p->bytes_moved < p->bytes_moved_threshold &&
769 	    (!bo->tbo.base.dma_buf ||
770 	    list_empty(&bo->tbo.base.dma_buf->attachments))) {
771 		if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
772 		    (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)) {
773 			/* And don't move a CPU_ACCESS_REQUIRED BO to limited
774 			 * visible VRAM if we've depleted our allowance to do
775 			 * that.
776 			 */
777 			if (p->bytes_moved_vis < p->bytes_moved_vis_threshold)
778 				domain = bo->preferred_domains;
779 			else
780 				domain = bo->allowed_domains;
781 		} else {
782 			domain = bo->preferred_domains;
783 		}
784 	} else {
785 		domain = bo->allowed_domains;
786 	}
787 
788 retry:
789 	amdgpu_bo_placement_from_domain(bo, domain);
790 	r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
791 
792 	p->bytes_moved += ctx.bytes_moved;
793 	if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
794 	    amdgpu_bo_in_cpu_visible_vram(bo))
795 		p->bytes_moved_vis += ctx.bytes_moved;
796 
797 	if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {
798 		domain = bo->allowed_domains;
799 		goto retry;
800 	}
801 
802 	return r;
803 }
804 
805 static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
806 			    struct list_head *validated)
807 {
808 	struct ttm_operation_ctx ctx = { true, false };
809 	struct amdgpu_bo_list_entry *lobj;
810 	int r;
811 
812 	list_for_each_entry(lobj, validated, tv.head) {
813 		struct amdgpu_bo *bo = ttm_to_amdgpu_bo(lobj->tv.bo);
814 		struct mm_struct *usermm;
815 
816 #ifdef notyet
817 		usermm = amdgpu_ttm_tt_get_usermm(bo->tbo.ttm);
818 		if (usermm && usermm != current->mm)
819 			return -EPERM;
820 #endif
821 
822 		if (amdgpu_ttm_tt_is_userptr(bo->tbo.ttm) &&
823 		    lobj->user_invalidated && lobj->user_pages) {
824 			amdgpu_bo_placement_from_domain(bo,
825 							AMDGPU_GEM_DOMAIN_CPU);
826 			r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
827 			if (r)
828 				return r;
829 
830 			amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm,
831 						     lobj->user_pages);
832 		}
833 
834 		r = amdgpu_cs_bo_validate(p, bo);
835 		if (r)
836 			return r;
837 
838 		kvfree(lobj->user_pages);
839 		lobj->user_pages = NULL;
840 	}
841 	return 0;
842 }
843 
844 static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
845 				union drm_amdgpu_cs *cs)
846 {
847 	struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
848 	struct amdgpu_vm *vm = &fpriv->vm;
849 	struct amdgpu_bo_list_entry *e;
850 	struct list_head duplicates;
851 	unsigned int i;
852 	int r;
853 
854 	INIT_LIST_HEAD(&p->validated);
855 
856 	/* p->bo_list could already be assigned if AMDGPU_CHUNK_ID_BO_HANDLES is present */
857 	if (cs->in.bo_list_handle) {
858 		if (p->bo_list)
859 			return -EINVAL;
860 
861 		r = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle,
862 				       &p->bo_list);
863 		if (r)
864 			return r;
865 	} else if (!p->bo_list) {
866 		/* Create a empty bo_list when no handle is provided */
867 		r = amdgpu_bo_list_create(p->adev, p->filp, NULL, 0,
868 					  &p->bo_list);
869 		if (r)
870 			return r;
871 	}
872 
873 	mutex_lock(&p->bo_list->bo_list_mutex);
874 
875 	/* One for TTM and one for each CS job */
876 	amdgpu_bo_list_for_each_entry(e, p->bo_list)
877 		e->tv.num_shared = 1 + p->gang_size;
878 	p->uf_entry.tv.num_shared = 1 + p->gang_size;
879 
880 	amdgpu_bo_list_get_list(p->bo_list, &p->validated);
881 
882 	INIT_LIST_HEAD(&duplicates);
883 	amdgpu_vm_get_pd_bo(&fpriv->vm, &p->validated, &p->vm_pd);
884 
885 	/* Two for VM updates, one for TTM and one for each CS job */
886 	p->vm_pd.tv.num_shared = 3 + p->gang_size;
887 
888 	if (p->uf_entry.tv.bo && !ttm_to_amdgpu_bo(p->uf_entry.tv.bo)->parent)
889 		list_add(&p->uf_entry.tv.head, &p->validated);
890 
891 	/* Get userptr backing pages. If pages are updated after registered
892 	 * in amdgpu_gem_userptr_ioctl(), amdgpu_cs_list_validate() will do
893 	 * amdgpu_ttm_backend_bind() to flush and invalidate new pages
894 	 */
895 	amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
896 		struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
897 		bool userpage_invalidated = false;
898 		int i;
899 
900 		e->user_pages = kvmalloc_array(bo->tbo.ttm->num_pages,
901 					sizeof(struct vm_page *),
902 					GFP_KERNEL | __GFP_ZERO);
903 		if (!e->user_pages) {
904 			DRM_ERROR("kvmalloc_array failure\n");
905 			r = -ENOMEM;
906 			goto out_free_user_pages;
907 		}
908 
909 		r = amdgpu_ttm_tt_get_user_pages(bo, e->user_pages, &e->range);
910 		if (r) {
911 			kvfree(e->user_pages);
912 			e->user_pages = NULL;
913 			goto out_free_user_pages;
914 		}
915 
916 		for (i = 0; i < bo->tbo.ttm->num_pages; i++) {
917 			if (bo->tbo.ttm->pages[i] != e->user_pages[i]) {
918 				userpage_invalidated = true;
919 				break;
920 			}
921 		}
922 		e->user_invalidated = userpage_invalidated;
923 	}
924 
925 	r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true,
926 				   &duplicates);
927 	if (unlikely(r != 0)) {
928 		if (r != -ERESTARTSYS)
929 			DRM_ERROR("ttm_eu_reserve_buffers failed.\n");
930 		goto out_free_user_pages;
931 	}
932 
933 	amdgpu_bo_list_for_each_entry(e, p->bo_list) {
934 		struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
935 
936 		e->bo_va = amdgpu_vm_bo_find(vm, bo);
937 	}
938 
939 	amdgpu_cs_get_threshold_for_moves(p->adev, &p->bytes_moved_threshold,
940 					  &p->bytes_moved_vis_threshold);
941 	p->bytes_moved = 0;
942 	p->bytes_moved_vis = 0;
943 
944 	r = amdgpu_vm_validate_pt_bos(p->adev, &fpriv->vm,
945 				      amdgpu_cs_bo_validate, p);
946 	if (r) {
947 		DRM_ERROR("amdgpu_vm_validate_pt_bos() failed.\n");
948 		goto error_validate;
949 	}
950 
951 	r = amdgpu_cs_list_validate(p, &duplicates);
952 	if (r)
953 		goto error_validate;
954 
955 	r = amdgpu_cs_list_validate(p, &p->validated);
956 	if (r)
957 		goto error_validate;
958 
959 	if (p->uf_entry.tv.bo) {
960 		struct amdgpu_bo *uf = ttm_to_amdgpu_bo(p->uf_entry.tv.bo);
961 
962 		r = amdgpu_ttm_alloc_gart(&uf->tbo);
963 		if (r)
964 			goto error_validate;
965 
966 		p->gang_leader->uf_addr += amdgpu_bo_gpu_offset(uf);
967 	}
968 
969 	amdgpu_cs_report_moved_bytes(p->adev, p->bytes_moved,
970 				     p->bytes_moved_vis);
971 
972 	for (i = 0; i < p->gang_size; ++i)
973 		amdgpu_job_set_resources(p->jobs[i], p->bo_list->gds_obj,
974 					 p->bo_list->gws_obj,
975 					 p->bo_list->oa_obj);
976 	return 0;
977 
978 error_validate:
979 	ttm_eu_backoff_reservation(&p->ticket, &p->validated);
980 
981 out_free_user_pages:
982 	amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
983 		struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
984 
985 		if (!e->user_pages)
986 			continue;
987 		amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm, e->range);
988 		kvfree(e->user_pages);
989 		e->user_pages = NULL;
990 		e->range = NULL;
991 	}
992 	mutex_unlock(&p->bo_list->bo_list_mutex);
993 	return r;
994 }
995 
996 static void trace_amdgpu_cs_ibs(struct amdgpu_cs_parser *p)
997 {
998 	int i, j;
999 
1000 	if (!trace_amdgpu_cs_enabled())
1001 		return;
1002 
1003 	for (i = 0; i < p->gang_size; ++i) {
1004 		struct amdgpu_job *job = p->jobs[i];
1005 
1006 		for (j = 0; j < job->num_ibs; ++j)
1007 			trace_amdgpu_cs(p, job, &job->ibs[j]);
1008 	}
1009 }
1010 
1011 static int amdgpu_cs_patch_ibs(struct amdgpu_cs_parser *p,
1012 			       struct amdgpu_job *job)
1013 {
1014 	struct amdgpu_ring *ring = amdgpu_job_ring(job);
1015 	unsigned int i;
1016 	int r;
1017 
1018 	/* Only for UVD/VCE VM emulation */
1019 	if (!ring->funcs->parse_cs && !ring->funcs->patch_cs_in_place)
1020 		return 0;
1021 
1022 	for (i = 0; i < job->num_ibs; ++i) {
1023 		struct amdgpu_ib *ib = &job->ibs[i];
1024 		struct amdgpu_bo_va_mapping *m;
1025 		struct amdgpu_bo *aobj;
1026 		uint64_t va_start;
1027 		uint8_t *kptr;
1028 
1029 		va_start = ib->gpu_addr & AMDGPU_GMC_HOLE_MASK;
1030 		r = amdgpu_cs_find_mapping(p, va_start, &aobj, &m);
1031 		if (r) {
1032 			DRM_ERROR("IB va_start is invalid\n");
1033 			return r;
1034 		}
1035 
1036 		if ((va_start + ib->length_dw * 4) >
1037 		    (m->last + 1) * AMDGPU_GPU_PAGE_SIZE) {
1038 			DRM_ERROR("IB va_start+ib_bytes is invalid\n");
1039 			return -EINVAL;
1040 		}
1041 
1042 		/* the IB should be reserved at this point */
1043 		r = amdgpu_bo_kmap(aobj, (void **)&kptr);
1044 		if (r) {
1045 			return r;
1046 		}
1047 
1048 		kptr += va_start - (m->start * AMDGPU_GPU_PAGE_SIZE);
1049 
1050 		if (ring->funcs->parse_cs) {
1051 			memcpy(ib->ptr, kptr, ib->length_dw * 4);
1052 			amdgpu_bo_kunmap(aobj);
1053 
1054 			r = amdgpu_ring_parse_cs(ring, p, job, ib);
1055 			if (r)
1056 				return r;
1057 		} else {
1058 			ib->ptr = (uint32_t *)kptr;
1059 			r = amdgpu_ring_patch_cs_in_place(ring, p, job, ib);
1060 			amdgpu_bo_kunmap(aobj);
1061 			if (r)
1062 				return r;
1063 		}
1064 	}
1065 
1066 	return 0;
1067 }
1068 
1069 static int amdgpu_cs_patch_jobs(struct amdgpu_cs_parser *p)
1070 {
1071 	unsigned int i;
1072 	int r;
1073 
1074 	for (i = 0; i < p->gang_size; ++i) {
1075 		r = amdgpu_cs_patch_ibs(p, p->jobs[i]);
1076 		if (r)
1077 			return r;
1078 	}
1079 	return 0;
1080 }
1081 
1082 static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
1083 {
1084 	struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
1085 	struct amdgpu_job *job = p->gang_leader;
1086 	struct amdgpu_device *adev = p->adev;
1087 	struct amdgpu_vm *vm = &fpriv->vm;
1088 	struct amdgpu_bo_list_entry *e;
1089 	struct amdgpu_bo_va *bo_va;
1090 	struct amdgpu_bo *bo;
1091 	unsigned int i;
1092 	int r;
1093 
1094 	r = amdgpu_vm_clear_freed(adev, vm, NULL);
1095 	if (r)
1096 		return r;
1097 
1098 	r = amdgpu_vm_bo_update(adev, fpriv->prt_va, false);
1099 	if (r)
1100 		return r;
1101 
1102 	r = amdgpu_sync_fence(&job->sync, fpriv->prt_va->last_pt_update);
1103 	if (r)
1104 		return r;
1105 
1106 	if (fpriv->csa_va) {
1107 		bo_va = fpriv->csa_va;
1108 		BUG_ON(!bo_va);
1109 		r = amdgpu_vm_bo_update(adev, bo_va, false);
1110 		if (r)
1111 			return r;
1112 
1113 		r = amdgpu_sync_fence(&job->sync, bo_va->last_pt_update);
1114 		if (r)
1115 			return r;
1116 	}
1117 
1118 	amdgpu_bo_list_for_each_entry(e, p->bo_list) {
1119 		/* ignore duplicates */
1120 		bo = ttm_to_amdgpu_bo(e->tv.bo);
1121 		if (!bo)
1122 			continue;
1123 
1124 		bo_va = e->bo_va;
1125 		if (bo_va == NULL)
1126 			continue;
1127 
1128 		r = amdgpu_vm_bo_update(adev, bo_va, false);
1129 		if (r)
1130 			return r;
1131 
1132 		r = amdgpu_sync_fence(&job->sync, bo_va->last_pt_update);
1133 		if (r)
1134 			return r;
1135 	}
1136 
1137 	r = amdgpu_vm_handle_moved(adev, vm);
1138 	if (r)
1139 		return r;
1140 
1141 	r = amdgpu_vm_update_pdes(adev, vm, false);
1142 	if (r)
1143 		return r;
1144 
1145 	r = amdgpu_sync_fence(&job->sync, vm->last_update);
1146 	if (r)
1147 		return r;
1148 
1149 	for (i = 0; i < p->gang_size; ++i) {
1150 		job = p->jobs[i];
1151 
1152 		if (!job->vm)
1153 			continue;
1154 
1155 		job->vm_pd_addr = amdgpu_gmc_pd_addr(vm->root.bo);
1156 	}
1157 
1158 	if (amdgpu_vm_debug) {
1159 		/* Invalidate all BOs to test for userspace bugs */
1160 		amdgpu_bo_list_for_each_entry(e, p->bo_list) {
1161 			struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
1162 
1163 			/* ignore duplicates */
1164 			if (!bo)
1165 				continue;
1166 
1167 			amdgpu_vm_bo_invalidate(adev, bo, false);
1168 		}
1169 	}
1170 
1171 	return 0;
1172 }
1173 
1174 static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p)
1175 {
1176 	struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
1177 	struct amdgpu_job *leader = p->gang_leader;
1178 	struct amdgpu_bo_list_entry *e;
1179 	unsigned int i;
1180 	int r;
1181 
1182 	list_for_each_entry(e, &p->validated, tv.head) {
1183 		struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
1184 		struct dma_resv *resv = bo->tbo.base.resv;
1185 		enum amdgpu_sync_mode sync_mode;
1186 
1187 		sync_mode = amdgpu_bo_explicit_sync(bo) ?
1188 			AMDGPU_SYNC_EXPLICIT : AMDGPU_SYNC_NE_OWNER;
1189 		r = amdgpu_sync_resv(p->adev, &leader->sync, resv, sync_mode,
1190 				     &fpriv->vm);
1191 		if (r)
1192 			return r;
1193 	}
1194 
1195 	for (i = 0; i < p->gang_size; ++i) {
1196 		if (p->jobs[i] == leader)
1197 			continue;
1198 
1199 		r = amdgpu_sync_clone(&leader->sync, &p->jobs[i]->sync);
1200 		if (r)
1201 			return r;
1202 	}
1203 
1204 	r = amdgpu_ctx_wait_prev_fence(p->ctx, p->entities[p->gang_leader_idx]);
1205 	if (r && r != -ERESTARTSYS)
1206 		DRM_ERROR("amdgpu_ctx_wait_prev_fence failed.\n");
1207 	return r;
1208 }
1209 
1210 static void amdgpu_cs_post_dependencies(struct amdgpu_cs_parser *p)
1211 {
1212 	int i;
1213 
1214 	for (i = 0; i < p->num_post_deps; ++i) {
1215 		if (p->post_deps[i].chain && p->post_deps[i].point) {
1216 			drm_syncobj_add_point(p->post_deps[i].syncobj,
1217 					      p->post_deps[i].chain,
1218 					      p->fence, p->post_deps[i].point);
1219 			p->post_deps[i].chain = NULL;
1220 		} else {
1221 			drm_syncobj_replace_fence(p->post_deps[i].syncobj,
1222 						  p->fence);
1223 		}
1224 	}
1225 }
1226 
1227 static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
1228 			    union drm_amdgpu_cs *cs)
1229 {
1230 	struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
1231 	struct amdgpu_job *leader = p->gang_leader;
1232 	struct amdgpu_bo_list_entry *e;
1233 	unsigned int i;
1234 	uint64_t seq;
1235 	int r;
1236 
1237 	for (i = 0; i < p->gang_size; ++i)
1238 		drm_sched_job_arm(&p->jobs[i]->base);
1239 
1240 	for (i = 0; i < p->gang_size; ++i) {
1241 		struct dma_fence *fence;
1242 
1243 		if (p->jobs[i] == leader)
1244 			continue;
1245 
1246 		fence = &p->jobs[i]->base.s_fence->scheduled;
1247 		r = amdgpu_sync_fence(&leader->sync, fence);
1248 		if (r)
1249 			goto error_cleanup;
1250 	}
1251 
1252 	if (p->gang_size > 1) {
1253 		for (i = 0; i < p->gang_size; ++i)
1254 			amdgpu_job_set_gang_leader(p->jobs[i], leader);
1255 	}
1256 
1257 	/* No memory allocation is allowed while holding the notifier lock.
1258 	 * The lock is held until amdgpu_cs_submit is finished and fence is
1259 	 * added to BOs.
1260 	 */
1261 	mutex_lock(&p->adev->notifier_lock);
1262 
1263 	/* If userptr are invalidated after amdgpu_cs_parser_bos(), return
1264 	 * -EAGAIN, drmIoctl in libdrm will restart the amdgpu_cs_ioctl.
1265 	 */
1266 	r = 0;
1267 	amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
1268 		struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
1269 
1270 		r |= !amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm, e->range);
1271 		e->range = NULL;
1272 	}
1273 	if (r) {
1274 		r = -EAGAIN;
1275 		goto error_unlock;
1276 	}
1277 
1278 	p->fence = dma_fence_get(&leader->base.s_fence->finished);
1279 	list_for_each_entry(e, &p->validated, tv.head) {
1280 
1281 		/* Everybody except for the gang leader uses READ */
1282 		for (i = 0; i < p->gang_size; ++i) {
1283 			if (p->jobs[i] == leader)
1284 				continue;
1285 
1286 			dma_resv_add_fence(e->tv.bo->base.resv,
1287 					   &p->jobs[i]->base.s_fence->finished,
1288 					   DMA_RESV_USAGE_READ);
1289 		}
1290 
1291 		/* The gang leader is remembered as writer */
1292 		e->tv.num_shared = 0;
1293 	}
1294 
1295 	seq = amdgpu_ctx_add_fence(p->ctx, p->entities[p->gang_leader_idx],
1296 				   p->fence);
1297 	amdgpu_cs_post_dependencies(p);
1298 
1299 	if ((leader->preamble_status & AMDGPU_PREAMBLE_IB_PRESENT) &&
1300 	    !p->ctx->preamble_presented) {
1301 		leader->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT_FIRST;
1302 		p->ctx->preamble_presented = true;
1303 	}
1304 
1305 	cs->out.handle = seq;
1306 	leader->uf_sequence = seq;
1307 
1308 	amdgpu_vm_bo_trace_cs(&fpriv->vm, &p->ticket);
1309 	for (i = 0; i < p->gang_size; ++i) {
1310 		amdgpu_job_free_resources(p->jobs[i]);
1311 		trace_amdgpu_cs_ioctl(p->jobs[i]);
1312 		drm_sched_entity_push_job(&p->jobs[i]->base);
1313 		p->jobs[i] = NULL;
1314 	}
1315 
1316 	amdgpu_vm_move_to_lru_tail(p->adev, &fpriv->vm);
1317 	ttm_eu_fence_buffer_objects(&p->ticket, &p->validated, p->fence);
1318 
1319 	mutex_unlock(&p->adev->notifier_lock);
1320 	mutex_unlock(&p->bo_list->bo_list_mutex);
1321 	return 0;
1322 
1323 error_unlock:
1324 	mutex_unlock(&p->adev->notifier_lock);
1325 
1326 error_cleanup:
1327 	for (i = 0; i < p->gang_size; ++i)
1328 		drm_sched_job_cleanup(&p->jobs[i]->base);
1329 	return r;
1330 }
1331 
1332 /* Cleanup the parser structure */
1333 static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser)
1334 {
1335 	unsigned i;
1336 
1337 	for (i = 0; i < parser->num_post_deps; i++) {
1338 		drm_syncobj_put(parser->post_deps[i].syncobj);
1339 		kfree(parser->post_deps[i].chain);
1340 	}
1341 	kfree(parser->post_deps);
1342 
1343 	dma_fence_put(parser->fence);
1344 
1345 	if (parser->ctx)
1346 		amdgpu_ctx_put(parser->ctx);
1347 	if (parser->bo_list)
1348 		amdgpu_bo_list_put(parser->bo_list);
1349 
1350 	for (i = 0; i < parser->nchunks; i++)
1351 		kvfree(parser->chunks[i].kdata);
1352 	kvfree(parser->chunks);
1353 	for (i = 0; i < parser->gang_size; ++i) {
1354 		if (parser->jobs[i])
1355 			amdgpu_job_free(parser->jobs[i]);
1356 	}
1357 	if (parser->uf_entry.tv.bo) {
1358 		struct amdgpu_bo *uf = ttm_to_amdgpu_bo(parser->uf_entry.tv.bo);
1359 
1360 		amdgpu_bo_unref(&uf);
1361 	}
1362 }
1363 
1364 int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
1365 {
1366 	struct amdgpu_device *adev = drm_to_adev(dev);
1367 	struct amdgpu_cs_parser parser;
1368 	int r;
1369 
1370 	if (amdgpu_ras_intr_triggered())
1371 		return -EHWPOISON;
1372 
1373 	if (!adev->accel_working)
1374 		return -EBUSY;
1375 
1376 	r = amdgpu_cs_parser_init(&parser, adev, filp, data);
1377 	if (r) {
1378 		if (printk_ratelimit())
1379 			DRM_ERROR("Failed to initialize parser %d!\n", r);
1380 		return r;
1381 	}
1382 
1383 	r = amdgpu_cs_pass1(&parser, data);
1384 	if (r)
1385 		goto error_fini;
1386 
1387 	r = amdgpu_cs_pass2(&parser);
1388 	if (r)
1389 		goto error_fini;
1390 
1391 	r = amdgpu_cs_parser_bos(&parser, data);
1392 	if (r) {
1393 		if (r == -ENOMEM)
1394 			DRM_ERROR("Not enough memory for command submission!\n");
1395 		else if (r != -ERESTARTSYS && r != -EAGAIN)
1396 			DRM_ERROR("Failed to process the buffer list %d!\n", r);
1397 		goto error_fini;
1398 	}
1399 
1400 	r = amdgpu_cs_patch_jobs(&parser);
1401 	if (r)
1402 		goto error_backoff;
1403 
1404 	r = amdgpu_cs_vm_handling(&parser);
1405 	if (r)
1406 		goto error_backoff;
1407 
1408 	r = amdgpu_cs_sync_rings(&parser);
1409 	if (r)
1410 		goto error_backoff;
1411 
1412 	trace_amdgpu_cs_ibs(&parser);
1413 
1414 	r = amdgpu_cs_submit(&parser, data);
1415 	if (r)
1416 		goto error_backoff;
1417 
1418 	amdgpu_cs_parser_fini(&parser);
1419 	return 0;
1420 
1421 error_backoff:
1422 	ttm_eu_backoff_reservation(&parser.ticket, &parser.validated);
1423 	mutex_unlock(&parser.bo_list->bo_list_mutex);
1424 
1425 error_fini:
1426 	amdgpu_cs_parser_fini(&parser);
1427 	return r;
1428 }
1429 
1430 /**
1431  * amdgpu_cs_wait_ioctl - wait for a command submission to finish
1432  *
1433  * @dev: drm device
1434  * @data: data from userspace
1435  * @filp: file private
1436  *
1437  * Wait for the command submission identified by handle to finish.
1438  */
1439 int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data,
1440 			 struct drm_file *filp)
1441 {
1442 	union drm_amdgpu_wait_cs *wait = data;
1443 	unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout);
1444 	struct drm_sched_entity *entity;
1445 	struct amdgpu_ctx *ctx;
1446 	struct dma_fence *fence;
1447 	long r;
1448 
1449 	ctx = amdgpu_ctx_get(filp->driver_priv, wait->in.ctx_id);
1450 	if (ctx == NULL)
1451 		return -EINVAL;
1452 
1453 	r = amdgpu_ctx_get_entity(ctx, wait->in.ip_type, wait->in.ip_instance,
1454 				  wait->in.ring, &entity);
1455 	if (r) {
1456 		amdgpu_ctx_put(ctx);
1457 		return r;
1458 	}
1459 
1460 	fence = amdgpu_ctx_get_fence(ctx, entity, wait->in.handle);
1461 	if (IS_ERR(fence))
1462 		r = PTR_ERR(fence);
1463 	else if (fence) {
1464 		r = dma_fence_wait_timeout(fence, true, timeout);
1465 		if (r > 0 && fence->error)
1466 			r = fence->error;
1467 		dma_fence_put(fence);
1468 	} else
1469 		r = 1;
1470 
1471 	amdgpu_ctx_put(ctx);
1472 	if (r < 0)
1473 		return r;
1474 
1475 	memset(wait, 0, sizeof(*wait));
1476 	wait->out.status = (r == 0);
1477 
1478 	return 0;
1479 }
1480 
1481 /**
1482  * amdgpu_cs_get_fence - helper to get fence from drm_amdgpu_fence
1483  *
1484  * @adev: amdgpu device
1485  * @filp: file private
1486  * @user: drm_amdgpu_fence copied from user space
1487  */
1488 static struct dma_fence *amdgpu_cs_get_fence(struct amdgpu_device *adev,
1489 					     struct drm_file *filp,
1490 					     struct drm_amdgpu_fence *user)
1491 {
1492 	struct drm_sched_entity *entity;
1493 	struct amdgpu_ctx *ctx;
1494 	struct dma_fence *fence;
1495 	int r;
1496 
1497 	ctx = amdgpu_ctx_get(filp->driver_priv, user->ctx_id);
1498 	if (ctx == NULL)
1499 		return ERR_PTR(-EINVAL);
1500 
1501 	r = amdgpu_ctx_get_entity(ctx, user->ip_type, user->ip_instance,
1502 				  user->ring, &entity);
1503 	if (r) {
1504 		amdgpu_ctx_put(ctx);
1505 		return ERR_PTR(r);
1506 	}
1507 
1508 	fence = amdgpu_ctx_get_fence(ctx, entity, user->seq_no);
1509 	amdgpu_ctx_put(ctx);
1510 
1511 	return fence;
1512 }
1513 
1514 int amdgpu_cs_fence_to_handle_ioctl(struct drm_device *dev, void *data,
1515 				    struct drm_file *filp)
1516 {
1517 	struct amdgpu_device *adev = drm_to_adev(dev);
1518 	union drm_amdgpu_fence_to_handle *info = data;
1519 	struct dma_fence *fence;
1520 	struct drm_syncobj *syncobj;
1521 	struct sync_file *sync_file;
1522 	int fd, r;
1523 
1524 	fence = amdgpu_cs_get_fence(adev, filp, &info->in.fence);
1525 	if (IS_ERR(fence))
1526 		return PTR_ERR(fence);
1527 
1528 	if (!fence)
1529 		fence = dma_fence_get_stub();
1530 
1531 	switch (info->in.what) {
1532 	case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ:
1533 		r = drm_syncobj_create(&syncobj, 0, fence);
1534 		dma_fence_put(fence);
1535 		if (r)
1536 			return r;
1537 		r = drm_syncobj_get_handle(filp, syncobj, &info->out.handle);
1538 		drm_syncobj_put(syncobj);
1539 		return r;
1540 
1541 	case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ_FD:
1542 		r = drm_syncobj_create(&syncobj, 0, fence);
1543 		dma_fence_put(fence);
1544 		if (r)
1545 			return r;
1546 		r = drm_syncobj_get_fd(syncobj, (int *)&info->out.handle);
1547 		drm_syncobj_put(syncobj);
1548 		return r;
1549 
1550 	case AMDGPU_FENCE_TO_HANDLE_GET_SYNC_FILE_FD:
1551 		fd = get_unused_fd_flags(O_CLOEXEC);
1552 		if (fd < 0) {
1553 			dma_fence_put(fence);
1554 			return fd;
1555 		}
1556 
1557 		sync_file = sync_file_create(fence);
1558 		dma_fence_put(fence);
1559 		if (!sync_file) {
1560 			put_unused_fd(fd);
1561 			return -ENOMEM;
1562 		}
1563 
1564 		fd_install(fd, sync_file->file);
1565 		info->out.handle = fd;
1566 		return 0;
1567 
1568 	default:
1569 		dma_fence_put(fence);
1570 		return -EINVAL;
1571 	}
1572 }
1573 
1574 /**
1575  * amdgpu_cs_wait_all_fences - wait on all fences to signal
1576  *
1577  * @adev: amdgpu device
1578  * @filp: file private
1579  * @wait: wait parameters
1580  * @fences: array of drm_amdgpu_fence
1581  */
1582 static int amdgpu_cs_wait_all_fences(struct amdgpu_device *adev,
1583 				     struct drm_file *filp,
1584 				     union drm_amdgpu_wait_fences *wait,
1585 				     struct drm_amdgpu_fence *fences)
1586 {
1587 	uint32_t fence_count = wait->in.fence_count;
1588 	unsigned int i;
1589 	long r = 1;
1590 
1591 	for (i = 0; i < fence_count; i++) {
1592 		struct dma_fence *fence;
1593 		unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout_ns);
1594 
1595 		fence = amdgpu_cs_get_fence(adev, filp, &fences[i]);
1596 		if (IS_ERR(fence))
1597 			return PTR_ERR(fence);
1598 		else if (!fence)
1599 			continue;
1600 
1601 		r = dma_fence_wait_timeout(fence, true, timeout);
1602 		if (r > 0 && fence->error)
1603 			r = fence->error;
1604 
1605 		dma_fence_put(fence);
1606 		if (r < 0)
1607 			return r;
1608 
1609 		if (r == 0)
1610 			break;
1611 	}
1612 
1613 	memset(wait, 0, sizeof(*wait));
1614 	wait->out.status = (r > 0);
1615 
1616 	return 0;
1617 }
1618 
1619 /**
1620  * amdgpu_cs_wait_any_fence - wait on any fence to signal
1621  *
1622  * @adev: amdgpu device
1623  * @filp: file private
1624  * @wait: wait parameters
1625  * @fences: array of drm_amdgpu_fence
1626  */
1627 static int amdgpu_cs_wait_any_fence(struct amdgpu_device *adev,
1628 				    struct drm_file *filp,
1629 				    union drm_amdgpu_wait_fences *wait,
1630 				    struct drm_amdgpu_fence *fences)
1631 {
1632 	unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout_ns);
1633 	uint32_t fence_count = wait->in.fence_count;
1634 	uint32_t first = ~0;
1635 	struct dma_fence **array;
1636 	unsigned int i;
1637 	long r;
1638 
1639 	/* Prepare the fence array */
1640 	array = kcalloc(fence_count, sizeof(struct dma_fence *), GFP_KERNEL);
1641 
1642 	if (array == NULL)
1643 		return -ENOMEM;
1644 
1645 	for (i = 0; i < fence_count; i++) {
1646 		struct dma_fence *fence;
1647 
1648 		fence = amdgpu_cs_get_fence(adev, filp, &fences[i]);
1649 		if (IS_ERR(fence)) {
1650 			r = PTR_ERR(fence);
1651 			goto err_free_fence_array;
1652 		} else if (fence) {
1653 			array[i] = fence;
1654 		} else { /* NULL, the fence has been already signaled */
1655 			r = 1;
1656 			first = i;
1657 			goto out;
1658 		}
1659 	}
1660 
1661 	r = dma_fence_wait_any_timeout(array, fence_count, true, timeout,
1662 				       &first);
1663 	if (r < 0)
1664 		goto err_free_fence_array;
1665 
1666 out:
1667 	memset(wait, 0, sizeof(*wait));
1668 	wait->out.status = (r > 0);
1669 	wait->out.first_signaled = first;
1670 
1671 	if (first < fence_count && array[first])
1672 		r = array[first]->error;
1673 	else
1674 		r = 0;
1675 
1676 err_free_fence_array:
1677 	for (i = 0; i < fence_count; i++)
1678 		dma_fence_put(array[i]);
1679 	kfree(array);
1680 
1681 	return r;
1682 }
1683 
1684 /**
1685  * amdgpu_cs_wait_fences_ioctl - wait for multiple command submissions to finish
1686  *
1687  * @dev: drm device
1688  * @data: data from userspace
1689  * @filp: file private
1690  */
1691 int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data,
1692 				struct drm_file *filp)
1693 {
1694 	struct amdgpu_device *adev = drm_to_adev(dev);
1695 	union drm_amdgpu_wait_fences *wait = data;
1696 	uint32_t fence_count = wait->in.fence_count;
1697 	struct drm_amdgpu_fence *fences_user;
1698 	struct drm_amdgpu_fence *fences;
1699 	int r;
1700 
1701 	/* Get the fences from userspace */
1702 	fences = kmalloc_array(fence_count, sizeof(struct drm_amdgpu_fence),
1703 			GFP_KERNEL);
1704 	if (fences == NULL)
1705 		return -ENOMEM;
1706 
1707 	fences_user = u64_to_user_ptr(wait->in.fences);
1708 	if (copy_from_user(fences, fences_user,
1709 		sizeof(struct drm_amdgpu_fence) * fence_count)) {
1710 		r = -EFAULT;
1711 		goto err_free_fences;
1712 	}
1713 
1714 	if (wait->in.wait_all)
1715 		r = amdgpu_cs_wait_all_fences(adev, filp, wait, fences);
1716 	else
1717 		r = amdgpu_cs_wait_any_fence(adev, filp, wait, fences);
1718 
1719 err_free_fences:
1720 	kfree(fences);
1721 
1722 	return r;
1723 }
1724 
1725 /**
1726  * amdgpu_cs_find_mapping - find bo_va for VM address
1727  *
1728  * @parser: command submission parser context
1729  * @addr: VM address
1730  * @bo: resulting BO of the mapping found
1731  * @map: Placeholder to return found BO mapping
1732  *
1733  * Search the buffer objects in the command submission context for a certain
1734  * virtual memory address. Returns allocation structure when found, NULL
1735  * otherwise.
1736  */
1737 int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
1738 			   uint64_t addr, struct amdgpu_bo **bo,
1739 			   struct amdgpu_bo_va_mapping **map)
1740 {
1741 	struct amdgpu_fpriv *fpriv = parser->filp->driver_priv;
1742 	struct ttm_operation_ctx ctx = { false, false };
1743 	struct amdgpu_vm *vm = &fpriv->vm;
1744 	struct amdgpu_bo_va_mapping *mapping;
1745 	int r;
1746 
1747 	addr /= AMDGPU_GPU_PAGE_SIZE;
1748 
1749 	mapping = amdgpu_vm_bo_lookup_mapping(vm, addr);
1750 	if (!mapping || !mapping->bo_va || !mapping->bo_va->base.bo)
1751 		return -EINVAL;
1752 
1753 	*bo = mapping->bo_va->base.bo;
1754 	*map = mapping;
1755 
1756 	/* Double check that the BO is reserved by this CS */
1757 	if (dma_resv_locking_ctx((*bo)->tbo.base.resv) != &parser->ticket)
1758 		return -EINVAL;
1759 
1760 	if (!((*bo)->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)) {
1761 		(*bo)->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
1762 		amdgpu_bo_placement_from_domain(*bo, (*bo)->allowed_domains);
1763 		r = ttm_bo_validate(&(*bo)->tbo, &(*bo)->placement, &ctx);
1764 		if (r)
1765 			return r;
1766 	}
1767 
1768 	return amdgpu_ttm_alloc_gart(&(*bo)->tbo);
1769 }
1770