xref: /netbsd-src/sys/external/bsd/drm2/dist/drm/amd/amdkfd/kfd_chardev.c (revision 41ec02673d281bbb3d38e6c78504ce6e30c228c1)
1 /*	$NetBSD: kfd_chardev.c,v 1.3 2021/12/18 23:44:59 riastradh Exp $	*/
2 
3 /*
4  * Copyright 2014 Advanced Micro Devices, Inc.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  */
24 
25 #include <sys/cdefs.h>
26 __KERNEL_RCSID(0, "$NetBSD: kfd_chardev.c,v 1.3 2021/12/18 23:44:59 riastradh Exp $");
27 
28 #include <linux/device.h>
29 #include <linux/export.h>
30 #include <linux/err.h>
31 #include <linux/fs.h>
32 #include <linux/file.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/uaccess.h>
36 #include <linux/compat.h>
37 #include <uapi/linux/kfd_ioctl.h>
38 #include <linux/time.h>
39 #include <linux/mm.h>
40 #include <linux/mman.h>
41 #include <linux/dma-buf.h>
42 #include <asm/processor.h>
43 #include "kfd_priv.h"
44 #include "kfd_device_queue_manager.h"
45 #include "kfd_dbgmgr.h"
46 #include "amdgpu_amdkfd.h"
47 
48 static long kfd_ioctl(struct file *, unsigned int, unsigned long);
49 static int kfd_open(struct inode *, struct file *);
50 static int kfd_release(struct inode *, struct file *);
51 static int kfd_mmap(struct file *, struct vm_area_struct *);
52 
53 static const char kfd_dev_name[] = "kfd";
54 
55 static const struct file_operations kfd_fops = {
56 	.owner = THIS_MODULE,
57 	.unlocked_ioctl = kfd_ioctl,
58 	.compat_ioctl = compat_ptr_ioctl,
59 	.open = kfd_open,
60 	.release = kfd_release,
61 	.mmap = kfd_mmap,
62 };
63 
64 static int kfd_char_dev_major = -1;
65 static struct class *kfd_class;
66 struct device *kfd_device;
67 
kfd_chardev_init(void)68 int kfd_chardev_init(void)
69 {
70 	int err = 0;
71 
72 	kfd_char_dev_major = register_chrdev(0, kfd_dev_name, &kfd_fops);
73 	err = kfd_char_dev_major;
74 	if (err < 0)
75 		goto err_register_chrdev;
76 
77 	kfd_class = class_create(THIS_MODULE, kfd_dev_name);
78 	err = PTR_ERR(kfd_class);
79 	if (IS_ERR(kfd_class))
80 		goto err_class_create;
81 
82 	kfd_device = device_create(kfd_class, NULL,
83 					MKDEV(kfd_char_dev_major, 0),
84 					NULL, kfd_dev_name);
85 	err = PTR_ERR(kfd_device);
86 	if (IS_ERR(kfd_device))
87 		goto err_device_create;
88 
89 	return 0;
90 
91 err_device_create:
92 	class_destroy(kfd_class);
93 err_class_create:
94 	unregister_chrdev(kfd_char_dev_major, kfd_dev_name);
95 err_register_chrdev:
96 	return err;
97 }
98 
kfd_chardev_exit(void)99 void kfd_chardev_exit(void)
100 {
101 	device_destroy(kfd_class, MKDEV(kfd_char_dev_major, 0));
102 	class_destroy(kfd_class);
103 	unregister_chrdev(kfd_char_dev_major, kfd_dev_name);
104 }
105 
kfd_chardev(void)106 struct device *kfd_chardev(void)
107 {
108 	return kfd_device;
109 }
110 
111 
kfd_open(struct inode * inode,struct file * filep)112 static int kfd_open(struct inode *inode, struct file *filep)
113 {
114 	struct kfd_process *process;
115 	bool is_32bit_user_mode;
116 
117 	if (iminor(inode) != 0)
118 		return -ENODEV;
119 
120 	is_32bit_user_mode = in_compat_syscall();
121 
122 	if (is_32bit_user_mode) {
123 		dev_warn(kfd_device,
124 			"Process %d (32-bit) failed to open /dev/kfd\n"
125 			"32-bit processes are not supported by amdkfd\n",
126 			current->pid);
127 		return -EPERM;
128 	}
129 
130 	process = kfd_create_process(filep);
131 	if (IS_ERR(process))
132 		return PTR_ERR(process);
133 
134 	if (kfd_is_locked()) {
135 		kfd_unref_process(process);
136 		return -EAGAIN;
137 	}
138 
139 	/* filep now owns the reference returned by kfd_create_process */
140 	filep->private_data = process;
141 
142 	dev_dbg(kfd_device, "process %d opened, compat mode (32 bit) - %d\n",
143 		process->pasid, process->is_32bit_user_mode);
144 
145 	return 0;
146 }
147 
kfd_release(struct inode * inode,struct file * filep)148 static int kfd_release(struct inode *inode, struct file *filep)
149 {
150 	struct kfd_process *process = filep->private_data;
151 
152 	if (process)
153 		kfd_unref_process(process);
154 
155 	return 0;
156 }
157 
kfd_ioctl_get_version(struct file * filep,struct kfd_process * p,void * data)158 static int kfd_ioctl_get_version(struct file *filep, struct kfd_process *p,
159 					void *data)
160 {
161 	struct kfd_ioctl_get_version_args *args = data;
162 
163 	args->major_version = KFD_IOCTL_MAJOR_VERSION;
164 	args->minor_version = KFD_IOCTL_MINOR_VERSION;
165 
166 	return 0;
167 }
168 
set_queue_properties_from_user(struct queue_properties * q_properties,struct kfd_ioctl_create_queue_args * args)169 static int set_queue_properties_from_user(struct queue_properties *q_properties,
170 				struct kfd_ioctl_create_queue_args *args)
171 {
172 	if (args->queue_percentage > KFD_MAX_QUEUE_PERCENTAGE) {
173 		pr_err("Queue percentage must be between 0 to KFD_MAX_QUEUE_PERCENTAGE\n");
174 		return -EINVAL;
175 	}
176 
177 	if (args->queue_priority > KFD_MAX_QUEUE_PRIORITY) {
178 		pr_err("Queue priority must be between 0 to KFD_MAX_QUEUE_PRIORITY\n");
179 		return -EINVAL;
180 	}
181 
182 	if ((args->ring_base_address) &&
183 		(!access_ok((const void __user *) args->ring_base_address,
184 			sizeof(uint64_t)))) {
185 		pr_err("Can't access ring base address\n");
186 		return -EFAULT;
187 	}
188 
189 	if (!is_power_of_2(args->ring_size) && (args->ring_size != 0)) {
190 		pr_err("Ring size must be a power of 2 or 0\n");
191 		return -EINVAL;
192 	}
193 
194 	if (!access_ok((const void __user *) args->read_pointer_address,
195 			sizeof(uint32_t))) {
196 		pr_err("Can't access read pointer\n");
197 		return -EFAULT;
198 	}
199 
200 	if (!access_ok((const void __user *) args->write_pointer_address,
201 			sizeof(uint32_t))) {
202 		pr_err("Can't access write pointer\n");
203 		return -EFAULT;
204 	}
205 
206 	if (args->eop_buffer_address &&
207 		!access_ok((const void __user *) args->eop_buffer_address,
208 			sizeof(uint32_t))) {
209 		pr_debug("Can't access eop buffer");
210 		return -EFAULT;
211 	}
212 
213 	if (args->ctx_save_restore_address &&
214 		!access_ok((const void __user *) args->ctx_save_restore_address,
215 			sizeof(uint32_t))) {
216 		pr_debug("Can't access ctx save restore buffer");
217 		return -EFAULT;
218 	}
219 
220 	q_properties->is_interop = false;
221 	q_properties->queue_percent = args->queue_percentage;
222 	q_properties->priority = args->queue_priority;
223 	q_properties->queue_address = args->ring_base_address;
224 	q_properties->queue_size = args->ring_size;
225 	q_properties->read_ptr = (uint32_t *) args->read_pointer_address;
226 	q_properties->write_ptr = (uint32_t *) args->write_pointer_address;
227 	q_properties->eop_ring_buffer_address = args->eop_buffer_address;
228 	q_properties->eop_ring_buffer_size = args->eop_buffer_size;
229 	q_properties->ctx_save_restore_area_address =
230 			args->ctx_save_restore_address;
231 	q_properties->ctx_save_restore_area_size = args->ctx_save_restore_size;
232 	q_properties->ctl_stack_size = args->ctl_stack_size;
233 	if (args->queue_type == KFD_IOC_QUEUE_TYPE_COMPUTE ||
234 		args->queue_type == KFD_IOC_QUEUE_TYPE_COMPUTE_AQL)
235 		q_properties->type = KFD_QUEUE_TYPE_COMPUTE;
236 	else if (args->queue_type == KFD_IOC_QUEUE_TYPE_SDMA)
237 		q_properties->type = KFD_QUEUE_TYPE_SDMA;
238 	else if (args->queue_type == KFD_IOC_QUEUE_TYPE_SDMA_XGMI)
239 		q_properties->type = KFD_QUEUE_TYPE_SDMA_XGMI;
240 	else
241 		return -ENOTSUPP;
242 
243 	if (args->queue_type == KFD_IOC_QUEUE_TYPE_COMPUTE_AQL)
244 		q_properties->format = KFD_QUEUE_FORMAT_AQL;
245 	else
246 		q_properties->format = KFD_QUEUE_FORMAT_PM4;
247 
248 	pr_debug("Queue Percentage: %d, %d\n",
249 			q_properties->queue_percent, args->queue_percentage);
250 
251 	pr_debug("Queue Priority: %d, %d\n",
252 			q_properties->priority, args->queue_priority);
253 
254 	pr_debug("Queue Address: 0x%llX, 0x%llX\n",
255 			q_properties->queue_address, args->ring_base_address);
256 
257 	pr_debug("Queue Size: 0x%llX, %u\n",
258 			q_properties->queue_size, args->ring_size);
259 
260 	pr_debug("Queue r/w Pointers: %px, %px\n",
261 			q_properties->read_ptr,
262 			q_properties->write_ptr);
263 
264 	pr_debug("Queue Format: %d\n", q_properties->format);
265 
266 	pr_debug("Queue EOP: 0x%llX\n", q_properties->eop_ring_buffer_address);
267 
268 	pr_debug("Queue CTX save area: 0x%llX\n",
269 			q_properties->ctx_save_restore_area_address);
270 
271 	return 0;
272 }
273 
kfd_ioctl_create_queue(struct file * filep,struct kfd_process * p,void * data)274 static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
275 					void *data)
276 {
277 	struct kfd_ioctl_create_queue_args *args = data;
278 	struct kfd_dev *dev;
279 	int err = 0;
280 	unsigned int queue_id;
281 	struct kfd_process_device *pdd;
282 	struct queue_properties q_properties;
283 	uint32_t doorbell_offset_in_process = 0;
284 
285 	memset(&q_properties, 0, sizeof(struct queue_properties));
286 
287 	pr_debug("Creating queue ioctl\n");
288 
289 	err = set_queue_properties_from_user(&q_properties, args);
290 	if (err)
291 		return err;
292 
293 	pr_debug("Looking for gpu id 0x%x\n", args->gpu_id);
294 	dev = kfd_device_by_id(args->gpu_id);
295 	if (!dev) {
296 		pr_debug("Could not find gpu id 0x%x\n", args->gpu_id);
297 		return -EINVAL;
298 	}
299 
300 	mutex_lock(&p->mutex);
301 
302 	pdd = kfd_bind_process_to_device(dev, p);
303 	if (IS_ERR(pdd)) {
304 		err = -ESRCH;
305 		goto err_bind_process;
306 	}
307 
308 	pr_debug("Creating queue for PASID 0x%x on gpu 0x%x\n",
309 			p->pasid,
310 			dev->id);
311 
312 	err = pqm_create_queue(&p->pqm, dev, filep, &q_properties, &queue_id,
313 			&doorbell_offset_in_process);
314 	if (err != 0)
315 		goto err_create_queue;
316 
317 	args->queue_id = queue_id;
318 
319 
320 	/* Return gpu_id as doorbell offset for mmap usage */
321 	args->doorbell_offset = KFD_MMAP_TYPE_DOORBELL;
322 	args->doorbell_offset |= KFD_MMAP_GPU_ID(args->gpu_id);
323 	if (KFD_IS_SOC15(dev->device_info->asic_family))
324 		/* On SOC15 ASICs, include the doorbell offset within the
325 		 * process doorbell frame, which is 2 pages.
326 		 */
327 		args->doorbell_offset |= doorbell_offset_in_process;
328 
329 	mutex_unlock(&p->mutex);
330 
331 	pr_debug("Queue id %d was created successfully\n", args->queue_id);
332 
333 	pr_debug("Ring buffer address == 0x%016llX\n",
334 			args->ring_base_address);
335 
336 	pr_debug("Read ptr address    == 0x%016llX\n",
337 			args->read_pointer_address);
338 
339 	pr_debug("Write ptr address   == 0x%016llX\n",
340 			args->write_pointer_address);
341 
342 	return 0;
343 
344 err_create_queue:
345 err_bind_process:
346 	mutex_unlock(&p->mutex);
347 	return err;
348 }
349 
kfd_ioctl_destroy_queue(struct file * filp,struct kfd_process * p,void * data)350 static int kfd_ioctl_destroy_queue(struct file *filp, struct kfd_process *p,
351 					void *data)
352 {
353 	int retval;
354 	struct kfd_ioctl_destroy_queue_args *args = data;
355 
356 	pr_debug("Destroying queue id %d for pasid 0x%x\n",
357 				args->queue_id,
358 				p->pasid);
359 
360 	mutex_lock(&p->mutex);
361 
362 	retval = pqm_destroy_queue(&p->pqm, args->queue_id);
363 
364 	mutex_unlock(&p->mutex);
365 	return retval;
366 }
367 
kfd_ioctl_update_queue(struct file * filp,struct kfd_process * p,void * data)368 static int kfd_ioctl_update_queue(struct file *filp, struct kfd_process *p,
369 					void *data)
370 {
371 	int retval;
372 	struct kfd_ioctl_update_queue_args *args = data;
373 	struct queue_properties properties;
374 
375 	if (args->queue_percentage > KFD_MAX_QUEUE_PERCENTAGE) {
376 		pr_err("Queue percentage must be between 0 to KFD_MAX_QUEUE_PERCENTAGE\n");
377 		return -EINVAL;
378 	}
379 
380 	if (args->queue_priority > KFD_MAX_QUEUE_PRIORITY) {
381 		pr_err("Queue priority must be between 0 to KFD_MAX_QUEUE_PRIORITY\n");
382 		return -EINVAL;
383 	}
384 
385 	if ((args->ring_base_address) &&
386 		(!access_ok((const void __user *) args->ring_base_address,
387 			sizeof(uint64_t)))) {
388 		pr_err("Can't access ring base address\n");
389 		return -EFAULT;
390 	}
391 
392 	if (!is_power_of_2(args->ring_size) && (args->ring_size != 0)) {
393 		pr_err("Ring size must be a power of 2 or 0\n");
394 		return -EINVAL;
395 	}
396 
397 	properties.queue_address = args->ring_base_address;
398 	properties.queue_size = args->ring_size;
399 	properties.queue_percent = args->queue_percentage;
400 	properties.priority = args->queue_priority;
401 
402 	pr_debug("Updating queue id %d for pasid 0x%x\n",
403 			args->queue_id, p->pasid);
404 
405 	mutex_lock(&p->mutex);
406 
407 	retval = pqm_update_queue(&p->pqm, args->queue_id, &properties);
408 
409 	mutex_unlock(&p->mutex);
410 
411 	return retval;
412 }
413 
kfd_ioctl_set_cu_mask(struct file * filp,struct kfd_process * p,void * data)414 static int kfd_ioctl_set_cu_mask(struct file *filp, struct kfd_process *p,
415 					void *data)
416 {
417 	int retval;
418 	const int max_num_cus = 1024;
419 	struct kfd_ioctl_set_cu_mask_args *args = data;
420 	struct queue_properties properties;
421 	uint32_t __user *cu_mask_ptr = (uint32_t __user *)args->cu_mask_ptr;
422 	size_t cu_mask_size = sizeof(uint32_t) * (args->num_cu_mask / 32);
423 
424 	if ((args->num_cu_mask % 32) != 0) {
425 		pr_debug("num_cu_mask 0x%x must be a multiple of 32",
426 				args->num_cu_mask);
427 		return -EINVAL;
428 	}
429 
430 	properties.cu_mask_count = args->num_cu_mask;
431 	if (properties.cu_mask_count == 0) {
432 		pr_debug("CU mask cannot be 0");
433 		return -EINVAL;
434 	}
435 
436 	/* To prevent an unreasonably large CU mask size, set an arbitrary
437 	 * limit of max_num_cus bits.  We can then just drop any CU mask bits
438 	 * past max_num_cus bits and just use the first max_num_cus bits.
439 	 */
440 	if (properties.cu_mask_count > max_num_cus) {
441 		pr_debug("CU mask cannot be greater than 1024 bits");
442 		properties.cu_mask_count = max_num_cus;
443 		cu_mask_size = sizeof(uint32_t) * (max_num_cus/32);
444 	}
445 
446 	properties.cu_mask = kzalloc(cu_mask_size, GFP_KERNEL);
447 	if (!properties.cu_mask)
448 		return -ENOMEM;
449 
450 	retval = copy_from_user(properties.cu_mask, cu_mask_ptr, cu_mask_size);
451 	if (retval) {
452 		pr_debug("Could not copy CU mask from userspace");
453 		kfree(properties.cu_mask);
454 		return -EFAULT;
455 	}
456 
457 	mutex_lock(&p->mutex);
458 
459 	retval = pqm_set_cu_mask(&p->pqm, args->queue_id, &properties);
460 
461 	mutex_unlock(&p->mutex);
462 
463 	if (retval)
464 		kfree(properties.cu_mask);
465 
466 	return retval;
467 }
468 
kfd_ioctl_get_queue_wave_state(struct file * filep,struct kfd_process * p,void * data)469 static int kfd_ioctl_get_queue_wave_state(struct file *filep,
470 					  struct kfd_process *p, void *data)
471 {
472 	struct kfd_ioctl_get_queue_wave_state_args *args = data;
473 	int r;
474 
475 	mutex_lock(&p->mutex);
476 
477 	r = pqm_get_wave_state(&p->pqm, args->queue_id,
478 			       (void __user *)args->ctl_stack_address,
479 			       &args->ctl_stack_used_size,
480 			       &args->save_area_used_size);
481 
482 	mutex_unlock(&p->mutex);
483 
484 	return r;
485 }
486 
kfd_ioctl_set_memory_policy(struct file * filep,struct kfd_process * p,void * data)487 static int kfd_ioctl_set_memory_policy(struct file *filep,
488 					struct kfd_process *p, void *data)
489 {
490 	struct kfd_ioctl_set_memory_policy_args *args = data;
491 	struct kfd_dev *dev;
492 	int err = 0;
493 	struct kfd_process_device *pdd;
494 	enum cache_policy default_policy, alternate_policy;
495 
496 	if (args->default_policy != KFD_IOC_CACHE_POLICY_COHERENT
497 	    && args->default_policy != KFD_IOC_CACHE_POLICY_NONCOHERENT) {
498 		return -EINVAL;
499 	}
500 
501 	if (args->alternate_policy != KFD_IOC_CACHE_POLICY_COHERENT
502 	    && args->alternate_policy != KFD_IOC_CACHE_POLICY_NONCOHERENT) {
503 		return -EINVAL;
504 	}
505 
506 	dev = kfd_device_by_id(args->gpu_id);
507 	if (!dev)
508 		return -EINVAL;
509 
510 	mutex_lock(&p->mutex);
511 
512 	pdd = kfd_bind_process_to_device(dev, p);
513 	if (IS_ERR(pdd)) {
514 		err = -ESRCH;
515 		goto out;
516 	}
517 
518 	default_policy = (args->default_policy == KFD_IOC_CACHE_POLICY_COHERENT)
519 			 ? cache_policy_coherent : cache_policy_noncoherent;
520 
521 	alternate_policy =
522 		(args->alternate_policy == KFD_IOC_CACHE_POLICY_COHERENT)
523 		   ? cache_policy_coherent : cache_policy_noncoherent;
524 
525 	if (!dev->dqm->ops.set_cache_memory_policy(dev->dqm,
526 				&pdd->qpd,
527 				default_policy,
528 				alternate_policy,
529 				(void __user *)args->alternate_aperture_base,
530 				args->alternate_aperture_size))
531 		err = -EINVAL;
532 
533 out:
534 	mutex_unlock(&p->mutex);
535 
536 	return err;
537 }
538 
kfd_ioctl_set_trap_handler(struct file * filep,struct kfd_process * p,void * data)539 static int kfd_ioctl_set_trap_handler(struct file *filep,
540 					struct kfd_process *p, void *data)
541 {
542 	struct kfd_ioctl_set_trap_handler_args *args = data;
543 	struct kfd_dev *dev;
544 	int err = 0;
545 	struct kfd_process_device *pdd;
546 
547 	dev = kfd_device_by_id(args->gpu_id);
548 	if (!dev)
549 		return -EINVAL;
550 
551 	mutex_lock(&p->mutex);
552 
553 	pdd = kfd_bind_process_to_device(dev, p);
554 	if (IS_ERR(pdd)) {
555 		err = -ESRCH;
556 		goto out;
557 	}
558 
559 	if (dev->dqm->ops.set_trap_handler(dev->dqm,
560 					&pdd->qpd,
561 					args->tba_addr,
562 					args->tma_addr))
563 		err = -EINVAL;
564 
565 out:
566 	mutex_unlock(&p->mutex);
567 
568 	return err;
569 }
570 
kfd_ioctl_dbg_register(struct file * filep,struct kfd_process * p,void * data)571 static int kfd_ioctl_dbg_register(struct file *filep,
572 				struct kfd_process *p, void *data)
573 {
574 	struct kfd_ioctl_dbg_register_args *args = data;
575 	struct kfd_dev *dev;
576 	struct kfd_dbgmgr *dbgmgr_ptr;
577 	struct kfd_process_device *pdd;
578 	bool create_ok;
579 	long status = 0;
580 
581 	dev = kfd_device_by_id(args->gpu_id);
582 	if (!dev)
583 		return -EINVAL;
584 
585 	if (dev->device_info->asic_family == CHIP_CARRIZO) {
586 		pr_debug("kfd_ioctl_dbg_register not supported on CZ\n");
587 		return -EINVAL;
588 	}
589 
590 	mutex_lock(&p->mutex);
591 	mutex_lock(kfd_get_dbgmgr_mutex());
592 
593 	/*
594 	 * make sure that we have pdd, if this the first queue created for
595 	 * this process
596 	 */
597 	pdd = kfd_bind_process_to_device(dev, p);
598 	if (IS_ERR(pdd)) {
599 		status = PTR_ERR(pdd);
600 		goto out;
601 	}
602 
603 	if (!dev->dbgmgr) {
604 		/* In case of a legal call, we have no dbgmgr yet */
605 		create_ok = kfd_dbgmgr_create(&dbgmgr_ptr, dev);
606 		if (create_ok) {
607 			status = kfd_dbgmgr_register(dbgmgr_ptr, p);
608 			if (status != 0)
609 				kfd_dbgmgr_destroy(dbgmgr_ptr);
610 			else
611 				dev->dbgmgr = dbgmgr_ptr;
612 		}
613 	} else {
614 		pr_debug("debugger already registered\n");
615 		status = -EINVAL;
616 	}
617 
618 out:
619 	mutex_unlock(kfd_get_dbgmgr_mutex());
620 	mutex_unlock(&p->mutex);
621 
622 	return status;
623 }
624 
kfd_ioctl_dbg_unregister(struct file * filep,struct kfd_process * p,void * data)625 static int kfd_ioctl_dbg_unregister(struct file *filep,
626 				struct kfd_process *p, void *data)
627 {
628 	struct kfd_ioctl_dbg_unregister_args *args = data;
629 	struct kfd_dev *dev;
630 	long status;
631 
632 	dev = kfd_device_by_id(args->gpu_id);
633 	if (!dev || !dev->dbgmgr)
634 		return -EINVAL;
635 
636 	if (dev->device_info->asic_family == CHIP_CARRIZO) {
637 		pr_debug("kfd_ioctl_dbg_unregister not supported on CZ\n");
638 		return -EINVAL;
639 	}
640 
641 	mutex_lock(kfd_get_dbgmgr_mutex());
642 
643 	status = kfd_dbgmgr_unregister(dev->dbgmgr, p);
644 	if (!status) {
645 		kfd_dbgmgr_destroy(dev->dbgmgr);
646 		dev->dbgmgr = NULL;
647 	}
648 
649 	mutex_unlock(kfd_get_dbgmgr_mutex());
650 
651 	return status;
652 }
653 
654 /*
655  * Parse and generate variable size data structure for address watch.
656  * Total size of the buffer and # watch points is limited in order
657  * to prevent kernel abuse. (no bearing to the much smaller HW limitation
658  * which is enforced by dbgdev module)
659  * please also note that the watch address itself are not "copied from user",
660  * since it be set into the HW in user mode values.
661  *
662  */
kfd_ioctl_dbg_address_watch(struct file * filep,struct kfd_process * p,void * data)663 static int kfd_ioctl_dbg_address_watch(struct file *filep,
664 					struct kfd_process *p, void *data)
665 {
666 	struct kfd_ioctl_dbg_address_watch_args *args = data;
667 	struct kfd_dev *dev;
668 	struct dbg_address_watch_info aw_info;
669 	unsigned char *args_buff;
670 	long status;
671 	void __user *cmd_from_user;
672 	uint64_t watch_mask_value = 0;
673 	unsigned int args_idx = 0;
674 
675 	memset((void *) &aw_info, 0, sizeof(struct dbg_address_watch_info));
676 
677 	dev = kfd_device_by_id(args->gpu_id);
678 	if (!dev)
679 		return -EINVAL;
680 
681 	if (dev->device_info->asic_family == CHIP_CARRIZO) {
682 		pr_debug("kfd_ioctl_dbg_wave_control not supported on CZ\n");
683 		return -EINVAL;
684 	}
685 
686 	cmd_from_user = (void __user *) args->content_ptr;
687 
688 	/* Validate arguments */
689 
690 	if ((args->buf_size_in_bytes > MAX_ALLOWED_AW_BUFF_SIZE) ||
691 		(args->buf_size_in_bytes <= sizeof(*args) + sizeof(int) * 2) ||
692 		(cmd_from_user == NULL))
693 		return -EINVAL;
694 
695 	/* this is the actual buffer to work with */
696 	args_buff = memdup_user(cmd_from_user,
697 				args->buf_size_in_bytes - sizeof(*args));
698 	if (IS_ERR(args_buff))
699 		return PTR_ERR(args_buff);
700 
701 	aw_info.process = p;
702 
703 	aw_info.num_watch_points = *((uint32_t *)(&args_buff[args_idx]));
704 	args_idx += sizeof(aw_info.num_watch_points);
705 
706 	aw_info.watch_mode = (enum HSA_DBG_WATCH_MODE *) &args_buff[args_idx];
707 	args_idx += sizeof(enum HSA_DBG_WATCH_MODE) * aw_info.num_watch_points;
708 
709 	/*
710 	 * set watch address base pointer to point on the array base
711 	 * within args_buff
712 	 */
713 	aw_info.watch_address = (uint64_t *) &args_buff[args_idx];
714 
715 	/* skip over the addresses buffer */
716 	args_idx += sizeof(aw_info.watch_address) * aw_info.num_watch_points;
717 
718 	if (args_idx >= args->buf_size_in_bytes - sizeof(*args)) {
719 		status = -EINVAL;
720 		goto out;
721 	}
722 
723 	watch_mask_value = (uint64_t) args_buff[args_idx];
724 
725 	if (watch_mask_value > 0) {
726 		/*
727 		 * There is an array of masks.
728 		 * set watch mask base pointer to point on the array base
729 		 * within args_buff
730 		 */
731 		aw_info.watch_mask = (uint64_t *) &args_buff[args_idx];
732 
733 		/* skip over the masks buffer */
734 		args_idx += sizeof(aw_info.watch_mask) *
735 				aw_info.num_watch_points;
736 	} else {
737 		/* just the NULL mask, set to NULL and skip over it */
738 		aw_info.watch_mask = NULL;
739 		args_idx += sizeof(aw_info.watch_mask);
740 	}
741 
742 	if (args_idx >= args->buf_size_in_bytes - sizeof(args)) {
743 		status = -EINVAL;
744 		goto out;
745 	}
746 
747 	/* Currently HSA Event is not supported for DBG */
748 	aw_info.watch_event = NULL;
749 
750 	mutex_lock(kfd_get_dbgmgr_mutex());
751 
752 	status = kfd_dbgmgr_address_watch(dev->dbgmgr, &aw_info);
753 
754 	mutex_unlock(kfd_get_dbgmgr_mutex());
755 
756 out:
757 	kfree(args_buff);
758 
759 	return status;
760 }
761 
762 /* Parse and generate fixed size data structure for wave control */
kfd_ioctl_dbg_wave_control(struct file * filep,struct kfd_process * p,void * data)763 static int kfd_ioctl_dbg_wave_control(struct file *filep,
764 					struct kfd_process *p, void *data)
765 {
766 	struct kfd_ioctl_dbg_wave_control_args *args = data;
767 	struct kfd_dev *dev;
768 	struct dbg_wave_control_info wac_info;
769 	unsigned char *args_buff;
770 	uint32_t computed_buff_size;
771 	long status;
772 	void __user *cmd_from_user;
773 	unsigned int args_idx = 0;
774 
775 	memset((void *) &wac_info, 0, sizeof(struct dbg_wave_control_info));
776 
777 	/* we use compact form, independent of the packing attribute value */
778 	computed_buff_size = sizeof(*args) +
779 				sizeof(wac_info.mode) +
780 				sizeof(wac_info.operand) +
781 				sizeof(wac_info.dbgWave_msg.DbgWaveMsg) +
782 				sizeof(wac_info.dbgWave_msg.MemoryVA) +
783 				sizeof(wac_info.trapId);
784 
785 	dev = kfd_device_by_id(args->gpu_id);
786 	if (!dev)
787 		return -EINVAL;
788 
789 	if (dev->device_info->asic_family == CHIP_CARRIZO) {
790 		pr_debug("kfd_ioctl_dbg_wave_control not supported on CZ\n");
791 		return -EINVAL;
792 	}
793 
794 	/* input size must match the computed "compact" size */
795 	if (args->buf_size_in_bytes != computed_buff_size) {
796 		pr_debug("size mismatch, computed : actual %u : %u\n",
797 				args->buf_size_in_bytes, computed_buff_size);
798 		return -EINVAL;
799 	}
800 
801 	cmd_from_user = (void __user *) args->content_ptr;
802 
803 	if (cmd_from_user == NULL)
804 		return -EINVAL;
805 
806 	/* copy the entire buffer from user */
807 
808 	args_buff = memdup_user(cmd_from_user,
809 				args->buf_size_in_bytes - sizeof(*args));
810 	if (IS_ERR(args_buff))
811 		return PTR_ERR(args_buff);
812 
813 	/* move ptr to the start of the "pay-load" area */
814 	wac_info.process = p;
815 
816 	wac_info.operand = *((enum HSA_DBG_WAVEOP *)(&args_buff[args_idx]));
817 	args_idx += sizeof(wac_info.operand);
818 
819 	wac_info.mode = *((enum HSA_DBG_WAVEMODE *)(&args_buff[args_idx]));
820 	args_idx += sizeof(wac_info.mode);
821 
822 	wac_info.trapId = *((uint32_t *)(&args_buff[args_idx]));
823 	args_idx += sizeof(wac_info.trapId);
824 
825 	wac_info.dbgWave_msg.DbgWaveMsg.WaveMsgInfoGen2.Value =
826 					*((uint32_t *)(&args_buff[args_idx]));
827 	wac_info.dbgWave_msg.MemoryVA = NULL;
828 
829 	mutex_lock(kfd_get_dbgmgr_mutex());
830 
831 	pr_debug("Calling dbg manager process %p, operand %u, mode %u, trapId %u, message %u\n",
832 			wac_info.process, wac_info.operand,
833 			wac_info.mode, wac_info.trapId,
834 			wac_info.dbgWave_msg.DbgWaveMsg.WaveMsgInfoGen2.Value);
835 
836 	status = kfd_dbgmgr_wave_control(dev->dbgmgr, &wac_info);
837 
838 	pr_debug("Returned status of dbg manager is %ld\n", status);
839 
840 	mutex_unlock(kfd_get_dbgmgr_mutex());
841 
842 	kfree(args_buff);
843 
844 	return status;
845 }
846 
kfd_ioctl_get_clock_counters(struct file * filep,struct kfd_process * p,void * data)847 static int kfd_ioctl_get_clock_counters(struct file *filep,
848 				struct kfd_process *p, void *data)
849 {
850 	struct kfd_ioctl_get_clock_counters_args *args = data;
851 	struct kfd_dev *dev;
852 
853 	dev = kfd_device_by_id(args->gpu_id);
854 	if (dev)
855 		/* Reading GPU clock counter from KGD */
856 		args->gpu_clock_counter = amdgpu_amdkfd_get_gpu_clock_counter(dev->kgd);
857 	else
858 		/* Node without GPU resource */
859 		args->gpu_clock_counter = 0;
860 
861 	/* No access to rdtsc. Using raw monotonic time */
862 	args->cpu_clock_counter = ktime_get_raw_ns();
863 	args->system_clock_counter = ktime_get_boottime_ns();
864 
865 	/* Since the counter is in nano-seconds we use 1GHz frequency */
866 	args->system_clock_freq = 1000000000;
867 
868 	return 0;
869 }
870 
871 
kfd_ioctl_get_process_apertures(struct file * filp,struct kfd_process * p,void * data)872 static int kfd_ioctl_get_process_apertures(struct file *filp,
873 				struct kfd_process *p, void *data)
874 {
875 	struct kfd_ioctl_get_process_apertures_args *args = data;
876 	struct kfd_process_device_apertures *pAperture;
877 	struct kfd_process_device *pdd;
878 
879 	dev_dbg(kfd_device, "get apertures for PASID 0x%x", p->pasid);
880 
881 	args->num_of_nodes = 0;
882 
883 	mutex_lock(&p->mutex);
884 
885 	/*if the process-device list isn't empty*/
886 	if (kfd_has_process_device_data(p)) {
887 		/* Run over all pdd of the process */
888 		pdd = kfd_get_first_process_device_data(p);
889 		do {
890 			pAperture =
891 				&args->process_apertures[args->num_of_nodes];
892 			pAperture->gpu_id = pdd->dev->id;
893 			pAperture->lds_base = pdd->lds_base;
894 			pAperture->lds_limit = pdd->lds_limit;
895 			pAperture->gpuvm_base = pdd->gpuvm_base;
896 			pAperture->gpuvm_limit = pdd->gpuvm_limit;
897 			pAperture->scratch_base = pdd->scratch_base;
898 			pAperture->scratch_limit = pdd->scratch_limit;
899 
900 			dev_dbg(kfd_device,
901 				"node id %u\n", args->num_of_nodes);
902 			dev_dbg(kfd_device,
903 				"gpu id %u\n", pdd->dev->id);
904 			dev_dbg(kfd_device,
905 				"lds_base %llX\n", pdd->lds_base);
906 			dev_dbg(kfd_device,
907 				"lds_limit %llX\n", pdd->lds_limit);
908 			dev_dbg(kfd_device,
909 				"gpuvm_base %llX\n", pdd->gpuvm_base);
910 			dev_dbg(kfd_device,
911 				"gpuvm_limit %llX\n", pdd->gpuvm_limit);
912 			dev_dbg(kfd_device,
913 				"scratch_base %llX\n", pdd->scratch_base);
914 			dev_dbg(kfd_device,
915 				"scratch_limit %llX\n", pdd->scratch_limit);
916 
917 			args->num_of_nodes++;
918 
919 			pdd = kfd_get_next_process_device_data(p, pdd);
920 		} while (pdd && (args->num_of_nodes < NUM_OF_SUPPORTED_GPUS));
921 	}
922 
923 	mutex_unlock(&p->mutex);
924 
925 	return 0;
926 }
927 
kfd_ioctl_get_process_apertures_new(struct file * filp,struct kfd_process * p,void * data)928 static int kfd_ioctl_get_process_apertures_new(struct file *filp,
929 				struct kfd_process *p, void *data)
930 {
931 	struct kfd_ioctl_get_process_apertures_new_args *args = data;
932 	struct kfd_process_device_apertures *pa;
933 	struct kfd_process_device *pdd;
934 	uint32_t nodes = 0;
935 	int ret;
936 
937 	dev_dbg(kfd_device, "get apertures for PASID 0x%x", p->pasid);
938 
939 	if (args->num_of_nodes == 0) {
940 		/* Return number of nodes, so that user space can alloacate
941 		 * sufficient memory
942 		 */
943 		mutex_lock(&p->mutex);
944 
945 		if (!kfd_has_process_device_data(p))
946 			goto out_unlock;
947 
948 		/* Run over all pdd of the process */
949 		pdd = kfd_get_first_process_device_data(p);
950 		do {
951 			args->num_of_nodes++;
952 			pdd = kfd_get_next_process_device_data(p, pdd);
953 		} while (pdd);
954 
955 		goto out_unlock;
956 	}
957 
958 	/* Fill in process-aperture information for all available
959 	 * nodes, but not more than args->num_of_nodes as that is
960 	 * the amount of memory allocated by user
961 	 */
962 	pa = kzalloc((sizeof(struct kfd_process_device_apertures) *
963 				args->num_of_nodes), GFP_KERNEL);
964 	if (!pa)
965 		return -ENOMEM;
966 
967 	mutex_lock(&p->mutex);
968 
969 	if (!kfd_has_process_device_data(p)) {
970 		args->num_of_nodes = 0;
971 		kfree(pa);
972 		goto out_unlock;
973 	}
974 
975 	/* Run over all pdd of the process */
976 	pdd = kfd_get_first_process_device_data(p);
977 	do {
978 		pa[nodes].gpu_id = pdd->dev->id;
979 		pa[nodes].lds_base = pdd->lds_base;
980 		pa[nodes].lds_limit = pdd->lds_limit;
981 		pa[nodes].gpuvm_base = pdd->gpuvm_base;
982 		pa[nodes].gpuvm_limit = pdd->gpuvm_limit;
983 		pa[nodes].scratch_base = pdd->scratch_base;
984 		pa[nodes].scratch_limit = pdd->scratch_limit;
985 
986 		dev_dbg(kfd_device,
987 			"gpu id %u\n", pdd->dev->id);
988 		dev_dbg(kfd_device,
989 			"lds_base %llX\n", pdd->lds_base);
990 		dev_dbg(kfd_device,
991 			"lds_limit %llX\n", pdd->lds_limit);
992 		dev_dbg(kfd_device,
993 			"gpuvm_base %llX\n", pdd->gpuvm_base);
994 		dev_dbg(kfd_device,
995 			"gpuvm_limit %llX\n", pdd->gpuvm_limit);
996 		dev_dbg(kfd_device,
997 			"scratch_base %llX\n", pdd->scratch_base);
998 		dev_dbg(kfd_device,
999 			"scratch_limit %llX\n", pdd->scratch_limit);
1000 		nodes++;
1001 
1002 		pdd = kfd_get_next_process_device_data(p, pdd);
1003 	} while (pdd && (nodes < args->num_of_nodes));
1004 	mutex_unlock(&p->mutex);
1005 
1006 	args->num_of_nodes = nodes;
1007 	ret = copy_to_user(
1008 			(void __user *)args->kfd_process_device_apertures_ptr,
1009 			pa,
1010 			(nodes * sizeof(struct kfd_process_device_apertures)));
1011 	kfree(pa);
1012 	return ret ? -EFAULT : 0;
1013 
1014 out_unlock:
1015 	mutex_unlock(&p->mutex);
1016 	return 0;
1017 }
1018 
kfd_ioctl_create_event(struct file * filp,struct kfd_process * p,void * data)1019 static int kfd_ioctl_create_event(struct file *filp, struct kfd_process *p,
1020 					void *data)
1021 {
1022 	struct kfd_ioctl_create_event_args *args = data;
1023 	int err;
1024 
1025 	/* For dGPUs the event page is allocated in user mode. The
1026 	 * handle is passed to KFD with the first call to this IOCTL
1027 	 * through the event_page_offset field.
1028 	 */
1029 	if (args->event_page_offset) {
1030 		struct kfd_dev *kfd;
1031 		struct kfd_process_device *pdd;
1032 		void *mem, *kern_addr;
1033 		uint64_t size;
1034 
1035 		if (p->signal_page) {
1036 			pr_err("Event page is already set\n");
1037 			return -EINVAL;
1038 		}
1039 
1040 		kfd = kfd_device_by_id(GET_GPU_ID(args->event_page_offset));
1041 		if (!kfd) {
1042 			pr_err("Getting device by id failed in %s\n", __func__);
1043 			return -EINVAL;
1044 		}
1045 
1046 		mutex_lock(&p->mutex);
1047 		pdd = kfd_bind_process_to_device(kfd, p);
1048 		if (IS_ERR(pdd)) {
1049 			err = PTR_ERR(pdd);
1050 			goto out_unlock;
1051 		}
1052 
1053 		mem = kfd_process_device_translate_handle(pdd,
1054 				GET_IDR_HANDLE(args->event_page_offset));
1055 		if (!mem) {
1056 			pr_err("Can't find BO, offset is 0x%llx\n",
1057 			       args->event_page_offset);
1058 			err = -EINVAL;
1059 			goto out_unlock;
1060 		}
1061 		mutex_unlock(&p->mutex);
1062 
1063 		err = amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(kfd->kgd,
1064 						mem, &kern_addr, &size);
1065 		if (err) {
1066 			pr_err("Failed to map event page to kernel\n");
1067 			return err;
1068 		}
1069 
1070 		err = kfd_event_page_set(p, kern_addr, size);
1071 		if (err) {
1072 			pr_err("Failed to set event page\n");
1073 			return err;
1074 		}
1075 	}
1076 
1077 	err = kfd_event_create(filp, p, args->event_type,
1078 				args->auto_reset != 0, args->node_id,
1079 				&args->event_id, &args->event_trigger_data,
1080 				&args->event_page_offset,
1081 				&args->event_slot_index);
1082 
1083 	return err;
1084 
1085 out_unlock:
1086 	mutex_unlock(&p->mutex);
1087 	return err;
1088 }
1089 
kfd_ioctl_destroy_event(struct file * filp,struct kfd_process * p,void * data)1090 static int kfd_ioctl_destroy_event(struct file *filp, struct kfd_process *p,
1091 					void *data)
1092 {
1093 	struct kfd_ioctl_destroy_event_args *args = data;
1094 
1095 	return kfd_event_destroy(p, args->event_id);
1096 }
1097 
kfd_ioctl_set_event(struct file * filp,struct kfd_process * p,void * data)1098 static int kfd_ioctl_set_event(struct file *filp, struct kfd_process *p,
1099 				void *data)
1100 {
1101 	struct kfd_ioctl_set_event_args *args = data;
1102 
1103 	return kfd_set_event(p, args->event_id);
1104 }
1105 
kfd_ioctl_reset_event(struct file * filp,struct kfd_process * p,void * data)1106 static int kfd_ioctl_reset_event(struct file *filp, struct kfd_process *p,
1107 				void *data)
1108 {
1109 	struct kfd_ioctl_reset_event_args *args = data;
1110 
1111 	return kfd_reset_event(p, args->event_id);
1112 }
1113 
kfd_ioctl_wait_events(struct file * filp,struct kfd_process * p,void * data)1114 static int kfd_ioctl_wait_events(struct file *filp, struct kfd_process *p,
1115 				void *data)
1116 {
1117 	struct kfd_ioctl_wait_events_args *args = data;
1118 	int err;
1119 
1120 	err = kfd_wait_on_events(p, args->num_events,
1121 			(void __user *)args->events_ptr,
1122 			(args->wait_for_all != 0),
1123 			args->timeout, &args->wait_result);
1124 
1125 	return err;
1126 }
kfd_ioctl_set_scratch_backing_va(struct file * filep,struct kfd_process * p,void * data)1127 static int kfd_ioctl_set_scratch_backing_va(struct file *filep,
1128 					struct kfd_process *p, void *data)
1129 {
1130 	struct kfd_ioctl_set_scratch_backing_va_args *args = data;
1131 	struct kfd_process_device *pdd;
1132 	struct kfd_dev *dev;
1133 	long err;
1134 
1135 	dev = kfd_device_by_id(args->gpu_id);
1136 	if (!dev)
1137 		return -EINVAL;
1138 
1139 	mutex_lock(&p->mutex);
1140 
1141 	pdd = kfd_bind_process_to_device(dev, p);
1142 	if (IS_ERR(pdd)) {
1143 		err = PTR_ERR(pdd);
1144 		goto bind_process_to_device_fail;
1145 	}
1146 
1147 	pdd->qpd.sh_hidden_private_base = args->va_addr;
1148 
1149 	mutex_unlock(&p->mutex);
1150 
1151 	if (dev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS &&
1152 	    pdd->qpd.vmid != 0 && dev->kfd2kgd->set_scratch_backing_va)
1153 		dev->kfd2kgd->set_scratch_backing_va(
1154 			dev->kgd, args->va_addr, pdd->qpd.vmid);
1155 
1156 	return 0;
1157 
1158 bind_process_to_device_fail:
1159 	mutex_unlock(&p->mutex);
1160 	return err;
1161 }
1162 
kfd_ioctl_get_tile_config(struct file * filep,struct kfd_process * p,void * data)1163 static int kfd_ioctl_get_tile_config(struct file *filep,
1164 		struct kfd_process *p, void *data)
1165 {
1166 	struct kfd_ioctl_get_tile_config_args *args = data;
1167 	struct kfd_dev *dev;
1168 	struct tile_config config;
1169 	int err = 0;
1170 
1171 	dev = kfd_device_by_id(args->gpu_id);
1172 	if (!dev)
1173 		return -EINVAL;
1174 
1175 	dev->kfd2kgd->get_tile_config(dev->kgd, &config);
1176 
1177 	args->gb_addr_config = config.gb_addr_config;
1178 	args->num_banks = config.num_banks;
1179 	args->num_ranks = config.num_ranks;
1180 
1181 	if (args->num_tile_configs > config.num_tile_configs)
1182 		args->num_tile_configs = config.num_tile_configs;
1183 	err = copy_to_user((void __user *)args->tile_config_ptr,
1184 			config.tile_config_ptr,
1185 			args->num_tile_configs * sizeof(uint32_t));
1186 	if (err) {
1187 		args->num_tile_configs = 0;
1188 		return -EFAULT;
1189 	}
1190 
1191 	if (args->num_macro_tile_configs > config.num_macro_tile_configs)
1192 		args->num_macro_tile_configs =
1193 				config.num_macro_tile_configs;
1194 	err = copy_to_user((void __user *)args->macro_tile_config_ptr,
1195 			config.macro_tile_config_ptr,
1196 			args->num_macro_tile_configs * sizeof(uint32_t));
1197 	if (err) {
1198 		args->num_macro_tile_configs = 0;
1199 		return -EFAULT;
1200 	}
1201 
1202 	return 0;
1203 }
1204 
kfd_ioctl_acquire_vm(struct file * filep,struct kfd_process * p,void * data)1205 static int kfd_ioctl_acquire_vm(struct file *filep, struct kfd_process *p,
1206 				void *data)
1207 {
1208 	struct kfd_ioctl_acquire_vm_args *args = data;
1209 	struct kfd_process_device *pdd;
1210 	struct kfd_dev *dev;
1211 	struct file *drm_file;
1212 	int ret;
1213 
1214 	dev = kfd_device_by_id(args->gpu_id);
1215 	if (!dev)
1216 		return -EINVAL;
1217 
1218 	drm_file = fget(args->drm_fd);
1219 	if (!drm_file)
1220 		return -EINVAL;
1221 
1222 	mutex_lock(&p->mutex);
1223 
1224 	pdd = kfd_get_process_device_data(dev, p);
1225 	if (!pdd) {
1226 		ret = -EINVAL;
1227 		goto err_unlock;
1228 	}
1229 
1230 	if (pdd->drm_file) {
1231 		ret = pdd->drm_file == drm_file ? 0 : -EBUSY;
1232 		goto err_unlock;
1233 	}
1234 
1235 	ret = kfd_process_device_init_vm(pdd, drm_file);
1236 	if (ret)
1237 		goto err_unlock;
1238 	/* On success, the PDD keeps the drm_file reference */
1239 	mutex_unlock(&p->mutex);
1240 
1241 	return 0;
1242 
1243 err_unlock:
1244 	mutex_unlock(&p->mutex);
1245 	fput(drm_file);
1246 	return ret;
1247 }
1248 
kfd_dev_is_large_bar(struct kfd_dev * dev)1249 bool kfd_dev_is_large_bar(struct kfd_dev *dev)
1250 {
1251 	struct kfd_local_mem_info mem_info;
1252 
1253 	if (debug_largebar) {
1254 		pr_debug("Simulate large-bar allocation on non large-bar machine\n");
1255 		return true;
1256 	}
1257 
1258 	if (dev->device_info->needs_iommu_device)
1259 		return false;
1260 
1261 	amdgpu_amdkfd_get_local_mem_info(dev->kgd, &mem_info);
1262 	if (mem_info.local_mem_size_private == 0 &&
1263 			mem_info.local_mem_size_public > 0)
1264 		return true;
1265 	return false;
1266 }
1267 
kfd_ioctl_alloc_memory_of_gpu(struct file * filep,struct kfd_process * p,void * data)1268 static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep,
1269 					struct kfd_process *p, void *data)
1270 {
1271 	struct kfd_ioctl_alloc_memory_of_gpu_args *args = data;
1272 	struct kfd_process_device *pdd;
1273 	void *mem;
1274 	struct kfd_dev *dev;
1275 	int idr_handle;
1276 	long err;
1277 	uint64_t offset = args->mmap_offset;
1278 	uint32_t flags = args->flags;
1279 
1280 	if (args->size == 0)
1281 		return -EINVAL;
1282 
1283 	dev = kfd_device_by_id(args->gpu_id);
1284 	if (!dev)
1285 		return -EINVAL;
1286 
1287 	if ((flags & KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC) &&
1288 		(flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) &&
1289 		!kfd_dev_is_large_bar(dev)) {
1290 		pr_err("Alloc host visible vram on small bar is not allowed\n");
1291 		return -EINVAL;
1292 	}
1293 
1294 	if (flags & KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL) {
1295 		if (args->size != kfd_doorbell_process_slice(dev))
1296 			return -EINVAL;
1297 		offset = kfd_get_process_doorbells(dev, p);
1298 	} else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP) {
1299 		if (args->size != PAGE_SIZE)
1300 			return -EINVAL;
1301 		offset = amdgpu_amdkfd_get_mmio_remap_phys_addr(dev->kgd);
1302 		if (!offset)
1303 			return -ENOMEM;
1304 	}
1305 
1306 	mutex_lock(&p->mutex);
1307 
1308 	pdd = kfd_bind_process_to_device(dev, p);
1309 	if (IS_ERR(pdd)) {
1310 		err = PTR_ERR(pdd);
1311 		goto err_unlock;
1312 	}
1313 
1314 	err = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
1315 		dev->kgd, args->va_addr, args->size,
1316 		pdd->vm, (struct kgd_mem **) &mem, &offset,
1317 		flags);
1318 
1319 	if (err)
1320 		goto err_unlock;
1321 
1322 	idr_handle = kfd_process_device_create_obj_handle(pdd, mem);
1323 	if (idr_handle < 0) {
1324 		err = -EFAULT;
1325 		goto err_free;
1326 	}
1327 
1328 	mutex_unlock(&p->mutex);
1329 
1330 	args->handle = MAKE_HANDLE(args->gpu_id, idr_handle);
1331 	args->mmap_offset = offset;
1332 
1333 	/* MMIO is mapped through kfd device
1334 	 * Generate a kfd mmap offset
1335 	 */
1336 	if (flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)
1337 		args->mmap_offset = KFD_MMAP_TYPE_MMIO
1338 					| KFD_MMAP_GPU_ID(args->gpu_id);
1339 
1340 	return 0;
1341 
1342 err_free:
1343 	amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd, (struct kgd_mem *)mem);
1344 err_unlock:
1345 	mutex_unlock(&p->mutex);
1346 	return err;
1347 }
1348 
kfd_ioctl_free_memory_of_gpu(struct file * filep,struct kfd_process * p,void * data)1349 static int kfd_ioctl_free_memory_of_gpu(struct file *filep,
1350 					struct kfd_process *p, void *data)
1351 {
1352 	struct kfd_ioctl_free_memory_of_gpu_args *args = data;
1353 	struct kfd_process_device *pdd;
1354 	void *mem;
1355 	struct kfd_dev *dev;
1356 	int ret;
1357 
1358 	dev = kfd_device_by_id(GET_GPU_ID(args->handle));
1359 	if (!dev)
1360 		return -EINVAL;
1361 
1362 	mutex_lock(&p->mutex);
1363 
1364 	pdd = kfd_get_process_device_data(dev, p);
1365 	if (!pdd) {
1366 		pr_err("Process device data doesn't exist\n");
1367 		ret = -EINVAL;
1368 		goto err_unlock;
1369 	}
1370 
1371 	mem = kfd_process_device_translate_handle(
1372 		pdd, GET_IDR_HANDLE(args->handle));
1373 	if (!mem) {
1374 		ret = -EINVAL;
1375 		goto err_unlock;
1376 	}
1377 
1378 	ret = amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd,
1379 						(struct kgd_mem *)mem);
1380 
1381 	/* If freeing the buffer failed, leave the handle in place for
1382 	 * clean-up during process tear-down.
1383 	 */
1384 	if (!ret)
1385 		kfd_process_device_remove_obj_handle(
1386 			pdd, GET_IDR_HANDLE(args->handle));
1387 
1388 err_unlock:
1389 	mutex_unlock(&p->mutex);
1390 	return ret;
1391 }
1392 
kfd_ioctl_map_memory_to_gpu(struct file * filep,struct kfd_process * p,void * data)1393 static int kfd_ioctl_map_memory_to_gpu(struct file *filep,
1394 					struct kfd_process *p, void *data)
1395 {
1396 	struct kfd_ioctl_map_memory_to_gpu_args *args = data;
1397 	struct kfd_process_device *pdd, *peer_pdd;
1398 	void *mem;
1399 	struct kfd_dev *dev, *peer;
1400 	long err = 0;
1401 	int i;
1402 	uint32_t *devices_arr = NULL;
1403 
1404 	dev = kfd_device_by_id(GET_GPU_ID(args->handle));
1405 	if (!dev)
1406 		return -EINVAL;
1407 
1408 	if (!args->n_devices) {
1409 		pr_debug("Device IDs array empty\n");
1410 		return -EINVAL;
1411 	}
1412 	if (args->n_success > args->n_devices) {
1413 		pr_debug("n_success exceeds n_devices\n");
1414 		return -EINVAL;
1415 	}
1416 
1417 	devices_arr = kmalloc_array(args->n_devices, sizeof(*devices_arr),
1418 				    GFP_KERNEL);
1419 	if (!devices_arr)
1420 		return -ENOMEM;
1421 
1422 	err = copy_from_user(devices_arr,
1423 			     (void __user *)args->device_ids_array_ptr,
1424 			     args->n_devices * sizeof(*devices_arr));
1425 	if (err != 0) {
1426 		err = -EFAULT;
1427 		goto copy_from_user_failed;
1428 	}
1429 
1430 	mutex_lock(&p->mutex);
1431 
1432 	pdd = kfd_bind_process_to_device(dev, p);
1433 	if (IS_ERR(pdd)) {
1434 		err = PTR_ERR(pdd);
1435 		goto bind_process_to_device_failed;
1436 	}
1437 
1438 	mem = kfd_process_device_translate_handle(pdd,
1439 						GET_IDR_HANDLE(args->handle));
1440 	if (!mem) {
1441 		err = -ENOMEM;
1442 		goto get_mem_obj_from_handle_failed;
1443 	}
1444 
1445 	for (i = args->n_success; i < args->n_devices; i++) {
1446 		peer = kfd_device_by_id(devices_arr[i]);
1447 		if (!peer) {
1448 			pr_debug("Getting device by id failed for 0x%x\n",
1449 				 devices_arr[i]);
1450 			err = -EINVAL;
1451 			goto get_mem_obj_from_handle_failed;
1452 		}
1453 
1454 		peer_pdd = kfd_bind_process_to_device(peer, p);
1455 		if (IS_ERR(peer_pdd)) {
1456 			err = PTR_ERR(peer_pdd);
1457 			goto get_mem_obj_from_handle_failed;
1458 		}
1459 		err = amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
1460 			peer->kgd, (struct kgd_mem *)mem, peer_pdd->vm);
1461 		if (err) {
1462 			pr_err("Failed to map to gpu %d/%d\n",
1463 			       i, args->n_devices);
1464 			goto map_memory_to_gpu_failed;
1465 		}
1466 		args->n_success = i+1;
1467 	}
1468 
1469 	mutex_unlock(&p->mutex);
1470 
1471 	err = amdgpu_amdkfd_gpuvm_sync_memory(dev->kgd, (struct kgd_mem *) mem, true);
1472 	if (err) {
1473 		pr_debug("Sync memory failed, wait interrupted by user signal\n");
1474 		goto sync_memory_failed;
1475 	}
1476 
1477 	/* Flush TLBs after waiting for the page table updates to complete */
1478 	for (i = 0; i < args->n_devices; i++) {
1479 		peer = kfd_device_by_id(devices_arr[i]);
1480 		if (WARN_ON_ONCE(!peer))
1481 			continue;
1482 		peer_pdd = kfd_get_process_device_data(peer, p);
1483 		if (WARN_ON_ONCE(!peer_pdd))
1484 			continue;
1485 		kfd_flush_tlb(peer_pdd);
1486 	}
1487 
1488 	kfree(devices_arr);
1489 
1490 	return err;
1491 
1492 bind_process_to_device_failed:
1493 get_mem_obj_from_handle_failed:
1494 map_memory_to_gpu_failed:
1495 	mutex_unlock(&p->mutex);
1496 copy_from_user_failed:
1497 sync_memory_failed:
1498 	kfree(devices_arr);
1499 
1500 	return err;
1501 }
1502 
kfd_ioctl_unmap_memory_from_gpu(struct file * filep,struct kfd_process * p,void * data)1503 static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep,
1504 					struct kfd_process *p, void *data)
1505 {
1506 	struct kfd_ioctl_unmap_memory_from_gpu_args *args = data;
1507 	struct kfd_process_device *pdd, *peer_pdd;
1508 	void *mem;
1509 	struct kfd_dev *dev, *peer;
1510 	long err = 0;
1511 	uint32_t *devices_arr = NULL, i;
1512 
1513 	dev = kfd_device_by_id(GET_GPU_ID(args->handle));
1514 	if (!dev)
1515 		return -EINVAL;
1516 
1517 	if (!args->n_devices) {
1518 		pr_debug("Device IDs array empty\n");
1519 		return -EINVAL;
1520 	}
1521 	if (args->n_success > args->n_devices) {
1522 		pr_debug("n_success exceeds n_devices\n");
1523 		return -EINVAL;
1524 	}
1525 
1526 	devices_arr = kmalloc_array(args->n_devices, sizeof(*devices_arr),
1527 				    GFP_KERNEL);
1528 	if (!devices_arr)
1529 		return -ENOMEM;
1530 
1531 	err = copy_from_user(devices_arr,
1532 			     (void __user *)args->device_ids_array_ptr,
1533 			     args->n_devices * sizeof(*devices_arr));
1534 	if (err != 0) {
1535 		err = -EFAULT;
1536 		goto copy_from_user_failed;
1537 	}
1538 
1539 	mutex_lock(&p->mutex);
1540 
1541 	pdd = kfd_get_process_device_data(dev, p);
1542 	if (!pdd) {
1543 		err = -EINVAL;
1544 		goto bind_process_to_device_failed;
1545 	}
1546 
1547 	mem = kfd_process_device_translate_handle(pdd,
1548 						GET_IDR_HANDLE(args->handle));
1549 	if (!mem) {
1550 		err = -ENOMEM;
1551 		goto get_mem_obj_from_handle_failed;
1552 	}
1553 
1554 	for (i = args->n_success; i < args->n_devices; i++) {
1555 		peer = kfd_device_by_id(devices_arr[i]);
1556 		if (!peer) {
1557 			err = -EINVAL;
1558 			goto get_mem_obj_from_handle_failed;
1559 		}
1560 
1561 		peer_pdd = kfd_get_process_device_data(peer, p);
1562 		if (!peer_pdd) {
1563 			err = -ENODEV;
1564 			goto get_mem_obj_from_handle_failed;
1565 		}
1566 		err = amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
1567 			peer->kgd, (struct kgd_mem *)mem, peer_pdd->vm);
1568 		if (err) {
1569 			pr_err("Failed to unmap from gpu %d/%d\n",
1570 			       i, args->n_devices);
1571 			goto unmap_memory_from_gpu_failed;
1572 		}
1573 		args->n_success = i+1;
1574 	}
1575 	kfree(devices_arr);
1576 
1577 	mutex_unlock(&p->mutex);
1578 
1579 	return 0;
1580 
1581 bind_process_to_device_failed:
1582 get_mem_obj_from_handle_failed:
1583 unmap_memory_from_gpu_failed:
1584 	mutex_unlock(&p->mutex);
1585 copy_from_user_failed:
1586 	kfree(devices_arr);
1587 	return err;
1588 }
1589 
kfd_ioctl_get_dmabuf_info(struct file * filep,struct kfd_process * p,void * data)1590 static int kfd_ioctl_get_dmabuf_info(struct file *filep,
1591 		struct kfd_process *p, void *data)
1592 {
1593 	struct kfd_ioctl_get_dmabuf_info_args *args = data;
1594 	struct kfd_dev *dev = NULL;
1595 	struct kgd_dev *dma_buf_kgd;
1596 	void *metadata_buffer = NULL;
1597 	uint32_t flags;
1598 	unsigned int i;
1599 	int r;
1600 
1601 	/* Find a KFD GPU device that supports the get_dmabuf_info query */
1602 	for (i = 0; kfd_topology_enum_kfd_devices(i, &dev) == 0; i++)
1603 		if (dev)
1604 			break;
1605 	if (!dev)
1606 		return -EINVAL;
1607 
1608 	if (args->metadata_ptr) {
1609 		metadata_buffer = kzalloc(args->metadata_size, GFP_KERNEL);
1610 		if (!metadata_buffer)
1611 			return -ENOMEM;
1612 	}
1613 
1614 	/* Get dmabuf info from KGD */
1615 	r = amdgpu_amdkfd_get_dmabuf_info(dev->kgd, args->dmabuf_fd,
1616 					  &dma_buf_kgd, &args->size,
1617 					  metadata_buffer, args->metadata_size,
1618 					  &args->metadata_size, &flags);
1619 	if (r)
1620 		goto exit;
1621 
1622 	/* Reverse-lookup gpu_id from kgd pointer */
1623 	dev = kfd_device_by_kgd(dma_buf_kgd);
1624 	if (!dev) {
1625 		r = -EINVAL;
1626 		goto exit;
1627 	}
1628 	args->gpu_id = dev->id;
1629 	args->flags = flags;
1630 
1631 	/* Copy metadata buffer to user mode */
1632 	if (metadata_buffer) {
1633 		r = copy_to_user((void __user *)args->metadata_ptr,
1634 				 metadata_buffer, args->metadata_size);
1635 		if (r != 0)
1636 			r = -EFAULT;
1637 	}
1638 
1639 exit:
1640 	kfree(metadata_buffer);
1641 
1642 	return r;
1643 }
1644 
kfd_ioctl_import_dmabuf(struct file * filep,struct kfd_process * p,void * data)1645 static int kfd_ioctl_import_dmabuf(struct file *filep,
1646 				   struct kfd_process *p, void *data)
1647 {
1648 	struct kfd_ioctl_import_dmabuf_args *args = data;
1649 	struct kfd_process_device *pdd;
1650 	struct dma_buf *dmabuf;
1651 	struct kfd_dev *dev;
1652 	int idr_handle;
1653 	uint64_t size;
1654 	void *mem;
1655 	int r;
1656 
1657 	dev = kfd_device_by_id(args->gpu_id);
1658 	if (!dev)
1659 		return -EINVAL;
1660 
1661 	dmabuf = dma_buf_get(args->dmabuf_fd);
1662 	if (IS_ERR(dmabuf))
1663 		return PTR_ERR(dmabuf);
1664 
1665 	mutex_lock(&p->mutex);
1666 
1667 	pdd = kfd_bind_process_to_device(dev, p);
1668 	if (IS_ERR(pdd)) {
1669 		r = PTR_ERR(pdd);
1670 		goto err_unlock;
1671 	}
1672 
1673 	r = amdgpu_amdkfd_gpuvm_import_dmabuf(dev->kgd, dmabuf,
1674 					      args->va_addr, pdd->vm,
1675 					      (struct kgd_mem **)&mem, &size,
1676 					      NULL);
1677 	if (r)
1678 		goto err_unlock;
1679 
1680 	idr_handle = kfd_process_device_create_obj_handle(pdd, mem);
1681 	if (idr_handle < 0) {
1682 		r = -EFAULT;
1683 		goto err_free;
1684 	}
1685 
1686 	mutex_unlock(&p->mutex);
1687 
1688 	args->handle = MAKE_HANDLE(args->gpu_id, idr_handle);
1689 
1690 	return 0;
1691 
1692 err_free:
1693 	amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd, (struct kgd_mem *)mem);
1694 err_unlock:
1695 	mutex_unlock(&p->mutex);
1696 	return r;
1697 }
1698 
1699 #define AMDKFD_IOCTL_DEF(ioctl, _func, _flags) \
1700 	[_IOC_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags, \
1701 			    .cmd_drv = 0, .name = #ioctl}
1702 
1703 /** Ioctl table */
1704 static const struct amdkfd_ioctl_desc amdkfd_ioctls[] = {
1705 	AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_VERSION,
1706 			kfd_ioctl_get_version, 0),
1707 
1708 	AMDKFD_IOCTL_DEF(AMDKFD_IOC_CREATE_QUEUE,
1709 			kfd_ioctl_create_queue, 0),
1710 
1711 	AMDKFD_IOCTL_DEF(AMDKFD_IOC_DESTROY_QUEUE,
1712 			kfd_ioctl_destroy_queue, 0),
1713 
1714 	AMDKFD_IOCTL_DEF(AMDKFD_IOC_SET_MEMORY_POLICY,
1715 			kfd_ioctl_set_memory_policy, 0),
1716 
1717 	AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_CLOCK_COUNTERS,
1718 			kfd_ioctl_get_clock_counters, 0),
1719 
1720 	AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_PROCESS_APERTURES,
1721 			kfd_ioctl_get_process_apertures, 0),
1722 
1723 	AMDKFD_IOCTL_DEF(AMDKFD_IOC_UPDATE_QUEUE,
1724 			kfd_ioctl_update_queue, 0),
1725 
1726 	AMDKFD_IOCTL_DEF(AMDKFD_IOC_CREATE_EVENT,
1727 			kfd_ioctl_create_event, 0),
1728 
1729 	AMDKFD_IOCTL_DEF(AMDKFD_IOC_DESTROY_EVENT,
1730 			kfd_ioctl_destroy_event, 0),
1731 
1732 	AMDKFD_IOCTL_DEF(AMDKFD_IOC_SET_EVENT,
1733 			kfd_ioctl_set_event, 0),
1734 
1735 	AMDKFD_IOCTL_DEF(AMDKFD_IOC_RESET_EVENT,
1736 			kfd_ioctl_reset_event, 0),
1737 
1738 	AMDKFD_IOCTL_DEF(AMDKFD_IOC_WAIT_EVENTS,
1739 			kfd_ioctl_wait_events, 0),
1740 
1741 	AMDKFD_IOCTL_DEF(AMDKFD_IOC_DBG_REGISTER,
1742 			kfd_ioctl_dbg_register, 0),
1743 
1744 	AMDKFD_IOCTL_DEF(AMDKFD_IOC_DBG_UNREGISTER,
1745 			kfd_ioctl_dbg_unregister, 0),
1746 
1747 	AMDKFD_IOCTL_DEF(AMDKFD_IOC_DBG_ADDRESS_WATCH,
1748 			kfd_ioctl_dbg_address_watch, 0),
1749 
1750 	AMDKFD_IOCTL_DEF(AMDKFD_IOC_DBG_WAVE_CONTROL,
1751 			kfd_ioctl_dbg_wave_control, 0),
1752 
1753 	AMDKFD_IOCTL_DEF(AMDKFD_IOC_SET_SCRATCH_BACKING_VA,
1754 			kfd_ioctl_set_scratch_backing_va, 0),
1755 
1756 	AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_TILE_CONFIG,
1757 			kfd_ioctl_get_tile_config, 0),
1758 
1759 	AMDKFD_IOCTL_DEF(AMDKFD_IOC_SET_TRAP_HANDLER,
1760 			kfd_ioctl_set_trap_handler, 0),
1761 
1762 	AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_PROCESS_APERTURES_NEW,
1763 			kfd_ioctl_get_process_apertures_new, 0),
1764 
1765 	AMDKFD_IOCTL_DEF(AMDKFD_IOC_ACQUIRE_VM,
1766 			kfd_ioctl_acquire_vm, 0),
1767 
1768 	AMDKFD_IOCTL_DEF(AMDKFD_IOC_ALLOC_MEMORY_OF_GPU,
1769 			kfd_ioctl_alloc_memory_of_gpu, 0),
1770 
1771 	AMDKFD_IOCTL_DEF(AMDKFD_IOC_FREE_MEMORY_OF_GPU,
1772 			kfd_ioctl_free_memory_of_gpu, 0),
1773 
1774 	AMDKFD_IOCTL_DEF(AMDKFD_IOC_MAP_MEMORY_TO_GPU,
1775 			kfd_ioctl_map_memory_to_gpu, 0),
1776 
1777 	AMDKFD_IOCTL_DEF(AMDKFD_IOC_UNMAP_MEMORY_FROM_GPU,
1778 			kfd_ioctl_unmap_memory_from_gpu, 0),
1779 
1780 	AMDKFD_IOCTL_DEF(AMDKFD_IOC_SET_CU_MASK,
1781 			kfd_ioctl_set_cu_mask, 0),
1782 
1783 	AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_QUEUE_WAVE_STATE,
1784 			kfd_ioctl_get_queue_wave_state, 0),
1785 
1786 	AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_DMABUF_INFO,
1787 				kfd_ioctl_get_dmabuf_info, 0),
1788 
1789 	AMDKFD_IOCTL_DEF(AMDKFD_IOC_IMPORT_DMABUF,
1790 				kfd_ioctl_import_dmabuf, 0),
1791 
1792 };
1793 
1794 #define AMDKFD_CORE_IOCTL_COUNT	ARRAY_SIZE(amdkfd_ioctls)
1795 
kfd_ioctl(struct file * filep,unsigned int cmd,unsigned long arg)1796 static long kfd_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
1797 {
1798 	struct kfd_process *process;
1799 	amdkfd_ioctl_t *func;
1800 	const struct amdkfd_ioctl_desc *ioctl = NULL;
1801 	unsigned int nr = _IOC_NR(cmd);
1802 	char stack_kdata[128];
1803 	char *kdata = NULL;
1804 	unsigned int usize, asize;
1805 	int retcode = -EINVAL;
1806 
1807 	if (nr >= AMDKFD_CORE_IOCTL_COUNT)
1808 		goto err_i1;
1809 
1810 	if ((nr >= AMDKFD_COMMAND_START) && (nr < AMDKFD_COMMAND_END)) {
1811 		u32 amdkfd_size;
1812 
1813 		ioctl = &amdkfd_ioctls[nr];
1814 
1815 		amdkfd_size = _IOC_SIZE(ioctl->cmd);
1816 		usize = asize = _IOC_SIZE(cmd);
1817 		if (amdkfd_size > asize)
1818 			asize = amdkfd_size;
1819 
1820 		cmd = ioctl->cmd;
1821 	} else
1822 		goto err_i1;
1823 
1824 	dev_dbg(kfd_device, "ioctl cmd 0x%x (#0x%x), arg 0x%lx\n", cmd, nr, arg);
1825 
1826 	/* Get the process struct from the filep. Only the process
1827 	 * that opened /dev/kfd can use the file descriptor. Child
1828 	 * processes need to create their own KFD device context.
1829 	 */
1830 	process = filep->private_data;
1831 	if (process->lead_thread != current->group_leader) {
1832 		dev_dbg(kfd_device, "Using KFD FD in wrong process\n");
1833 		retcode = -EBADF;
1834 		goto err_i1;
1835 	}
1836 
1837 	/* Do not trust userspace, use our own definition */
1838 	func = ioctl->func;
1839 
1840 	if (unlikely(!func)) {
1841 		dev_dbg(kfd_device, "no function\n");
1842 		retcode = -EINVAL;
1843 		goto err_i1;
1844 	}
1845 
1846 	if (cmd & (IOC_IN | IOC_OUT)) {
1847 		if (asize <= sizeof(stack_kdata)) {
1848 			kdata = stack_kdata;
1849 		} else {
1850 			kdata = kmalloc(asize, GFP_KERNEL);
1851 			if (!kdata) {
1852 				retcode = -ENOMEM;
1853 				goto err_i1;
1854 			}
1855 		}
1856 		if (asize > usize)
1857 			memset(kdata + usize, 0, asize - usize);
1858 	}
1859 
1860 	if (cmd & IOC_IN) {
1861 		if (copy_from_user(kdata, (void __user *)arg, usize) != 0) {
1862 			retcode = -EFAULT;
1863 			goto err_i1;
1864 		}
1865 	} else if (cmd & IOC_OUT) {
1866 		memset(kdata, 0, usize);
1867 	}
1868 
1869 	retcode = func(filep, process, kdata);
1870 
1871 	if (cmd & IOC_OUT)
1872 		if (copy_to_user((void __user *)arg, kdata, usize) != 0)
1873 			retcode = -EFAULT;
1874 
1875 err_i1:
1876 	if (!ioctl)
1877 		dev_dbg(kfd_device, "invalid ioctl: pid=%d, cmd=0x%02x, nr=0x%02x\n",
1878 			  task_pid_nr(current), cmd, nr);
1879 
1880 	if (kdata != stack_kdata)
1881 		kfree(kdata);
1882 
1883 	if (retcode)
1884 		dev_dbg(kfd_device, "ioctl cmd (#0x%x), arg 0x%lx, ret = %d\n",
1885 				nr, arg, retcode);
1886 
1887 	return retcode;
1888 }
1889 
kfd_mmio_mmap(struct kfd_dev * dev,struct kfd_process * process,struct vm_area_struct * vma)1890 static int kfd_mmio_mmap(struct kfd_dev *dev, struct kfd_process *process,
1891 		      struct vm_area_struct *vma)
1892 {
1893 	phys_addr_t address;
1894 	int ret;
1895 
1896 	if (vma->vm_end - vma->vm_start != PAGE_SIZE)
1897 		return -EINVAL;
1898 
1899 	address = amdgpu_amdkfd_get_mmio_remap_phys_addr(dev->kgd);
1900 
1901 	vma->vm_flags |= VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_NORESERVE |
1902 				VM_DONTDUMP | VM_PFNMAP;
1903 
1904 	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1905 
1906 	pr_debug("pasid 0x%x mapping mmio page\n"
1907 		 "     target user address == 0x%08llX\n"
1908 		 "     physical address    == 0x%08llX\n"
1909 		 "     vm_flags            == 0x%04lX\n"
1910 		 "     size                == 0x%04lX\n",
1911 		 process->pasid, (unsigned long long) vma->vm_start,
1912 		 address, vma->vm_flags, PAGE_SIZE);
1913 
1914 	ret = io_remap_pfn_range(vma,
1915 				vma->vm_start,
1916 				address >> PAGE_SHIFT,
1917 				PAGE_SIZE,
1918 				vma->vm_page_prot);
1919 	return ret;
1920 }
1921 
1922 
kfd_mmap(struct file * filp,struct vm_area_struct * vma)1923 static int kfd_mmap(struct file *filp, struct vm_area_struct *vma)
1924 {
1925 	struct kfd_process *process;
1926 	struct kfd_dev *dev = NULL;
1927 	unsigned long mmap_offset;
1928 	unsigned int gpu_id;
1929 
1930 	process = kfd_get_process(current);
1931 	if (IS_ERR(process))
1932 		return PTR_ERR(process);
1933 
1934 	mmap_offset = vma->vm_pgoff << PAGE_SHIFT;
1935 	gpu_id = KFD_MMAP_GET_GPU_ID(mmap_offset);
1936 	if (gpu_id)
1937 		dev = kfd_device_by_id(gpu_id);
1938 
1939 	switch (mmap_offset & KFD_MMAP_TYPE_MASK) {
1940 	case KFD_MMAP_TYPE_DOORBELL:
1941 		if (!dev)
1942 			return -ENODEV;
1943 		return kfd_doorbell_mmap(dev, process, vma);
1944 
1945 	case KFD_MMAP_TYPE_EVENTS:
1946 		return kfd_event_mmap(process, vma);
1947 
1948 	case KFD_MMAP_TYPE_RESERVED_MEM:
1949 		if (!dev)
1950 			return -ENODEV;
1951 		return kfd_reserved_mem_mmap(dev, process, vma);
1952 	case KFD_MMAP_TYPE_MMIO:
1953 		if (!dev)
1954 			return -ENODEV;
1955 		return kfd_mmio_mmap(dev, process, vma);
1956 	}
1957 
1958 	return -EFAULT;
1959 }
1960