xref: /dflybsd-src/sys/dev/drm/amd/amdgpu/amdgpu_kms.c (revision b843c749addef9340ee7d4e250b09fdd492602a1)
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 #include <drm/drmP.h>
29 #include "amdgpu.h"
30 #include <drm/amdgpu_drm.h>
31 #include "amdgpu_sched.h"
32 #include "amdgpu_uvd.h"
33 #include "amdgpu_vce.h"
34 #include "atom.h"
35 
36 #include <linux/vga_switcheroo.h>
37 #include <linux/slab.h>
38 #include <linux/pm_runtime.h>
39 #include "amdgpu_amdkfd.h"
40 
41 /**
42  * amdgpu_driver_unload_kms - Main unload function for KMS.
43  *
44  * @dev: drm dev pointer
45  *
46  * This is the main unload function for KMS (all asics).
47  * Returns 0 on success.
48  */
49 void amdgpu_driver_unload_kms(struct drm_device *dev)
50 {
51 	struct amdgpu_device *adev = dev->dev_private;
52 
53 	if (adev == NULL)
54 		return;
55 
56 	if (adev->rmmio == NULL)
57 		goto done_free;
58 
59 	if (amdgpu_sriov_vf(adev))
60 		amdgpu_virt_request_full_gpu(adev, false);
61 
62 	if (amdgpu_device_is_px(dev)) {
63 		pm_runtime_get_sync(dev->dev);
64 		pm_runtime_forbid(dev->dev);
65 	}
66 
67 	amdgpu_acpi_fini(adev);
68 
69 	amdgpu_device_fini(adev);
70 
71 done_free:
72 	kfree(adev);
73 	dev->dev_private = NULL;
74 }
75 
76 /**
77  * amdgpu_driver_load_kms - Main load function for KMS.
78  *
79  * @dev: drm dev pointer
80  * @flags: device flags
81  *
82  * This is the main load function for KMS (all asics).
83  * Returns 0 on success, error on failure.
84  */
85 int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags)
86 {
87 	struct amdgpu_device *adev;
88 	int r, acpi_status;
89 
90 	adev = kzalloc(sizeof(struct amdgpu_device), GFP_KERNEL);
91 	if (adev == NULL) {
92 		return -ENOMEM;
93 	}
94 	dev->dev_private = (void *)adev;
95 
96 	if ((amdgpu_runtime_pm != 0) &&
97 	    amdgpu_has_atpx() &&
98 	    (amdgpu_is_atpx_hybrid() ||
99 	     amdgpu_has_atpx_dgpu_power_cntl()) &&
100 	    ((flags & AMD_IS_APU) == 0) &&
101 	    !pci_is_thunderbolt_attached(dev->pdev))
102 		flags |= AMD_IS_PX;
103 
104 	/* amdgpu_device_init should report only fatal error
105 	 * like memory allocation failure or iomapping failure,
106 	 * or memory manager initialization failure, it must
107 	 * properly initialize the GPU MC controller and permit
108 	 * VRAM allocation
109 	 */
110 	r = amdgpu_device_init(adev, dev, dev->pdev, flags);
111 	if (r) {
112 		dev_err(&dev->pdev->dev, "Fatal error during GPU init\n");
113 		goto out;
114 	}
115 
116 	/* Call ACPI methods: require modeset init
117 	 * but failure is not fatal
118 	 */
119 	if (!r) {
120 		acpi_status = amdgpu_acpi_init(adev);
121 		if (acpi_status)
122 		dev_dbg(&dev->pdev->dev,
123 				"Error during ACPI methods call\n");
124 	}
125 
126 	if (amdgpu_device_is_px(dev)) {
127 		dev_pm_set_driver_flags(dev->dev, DPM_FLAG_NEVER_SKIP);
128 		pm_runtime_use_autosuspend(dev->dev);
129 		pm_runtime_set_autosuspend_delay(dev->dev, 5000);
130 		pm_runtime_set_active(dev->dev);
131 		pm_runtime_allow(dev->dev);
132 		pm_runtime_mark_last_busy(dev->dev);
133 		pm_runtime_put_autosuspend(dev->dev);
134 	}
135 
136 out:
137 	if (r) {
138 		/* balance pm_runtime_get_sync in amdgpu_driver_unload_kms */
139 		if (adev->rmmio && amdgpu_device_is_px(dev))
140 			pm_runtime_put_noidle(dev->dev);
141 		amdgpu_driver_unload_kms(dev);
142 	}
143 
144 	return r;
145 }
146 
147 static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info,
148 				struct drm_amdgpu_query_fw *query_fw,
149 				struct amdgpu_device *adev)
150 {
151 	switch (query_fw->fw_type) {
152 	case AMDGPU_INFO_FW_VCE:
153 		fw_info->ver = adev->vce.fw_version;
154 		fw_info->feature = adev->vce.fb_version;
155 		break;
156 	case AMDGPU_INFO_FW_UVD:
157 		fw_info->ver = adev->uvd.fw_version;
158 		fw_info->feature = 0;
159 		break;
160 	case AMDGPU_INFO_FW_VCN:
161 		fw_info->ver = adev->vcn.fw_version;
162 		fw_info->feature = 0;
163 		break;
164 	case AMDGPU_INFO_FW_GMC:
165 		fw_info->ver = adev->gmc.fw_version;
166 		fw_info->feature = 0;
167 		break;
168 	case AMDGPU_INFO_FW_GFX_ME:
169 		fw_info->ver = adev->gfx.me_fw_version;
170 		fw_info->feature = adev->gfx.me_feature_version;
171 		break;
172 	case AMDGPU_INFO_FW_GFX_PFP:
173 		fw_info->ver = adev->gfx.pfp_fw_version;
174 		fw_info->feature = adev->gfx.pfp_feature_version;
175 		break;
176 	case AMDGPU_INFO_FW_GFX_CE:
177 		fw_info->ver = adev->gfx.ce_fw_version;
178 		fw_info->feature = adev->gfx.ce_feature_version;
179 		break;
180 	case AMDGPU_INFO_FW_GFX_RLC:
181 		fw_info->ver = adev->gfx.rlc_fw_version;
182 		fw_info->feature = adev->gfx.rlc_feature_version;
183 		break;
184 	case AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_CNTL:
185 		fw_info->ver = adev->gfx.rlc_srlc_fw_version;
186 		fw_info->feature = adev->gfx.rlc_srlc_feature_version;
187 		break;
188 	case AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_GPM_MEM:
189 		fw_info->ver = adev->gfx.rlc_srlg_fw_version;
190 		fw_info->feature = adev->gfx.rlc_srlg_feature_version;
191 		break;
192 	case AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_SRM_MEM:
193 		fw_info->ver = adev->gfx.rlc_srls_fw_version;
194 		fw_info->feature = adev->gfx.rlc_srls_feature_version;
195 		break;
196 	case AMDGPU_INFO_FW_GFX_MEC:
197 		if (query_fw->index == 0) {
198 			fw_info->ver = adev->gfx.mec_fw_version;
199 			fw_info->feature = adev->gfx.mec_feature_version;
200 		} else if (query_fw->index == 1) {
201 			fw_info->ver = adev->gfx.mec2_fw_version;
202 			fw_info->feature = adev->gfx.mec2_feature_version;
203 		} else
204 			return -EINVAL;
205 		break;
206 	case AMDGPU_INFO_FW_SMC:
207 		fw_info->ver = adev->pm.fw_version;
208 		fw_info->feature = 0;
209 		break;
210 	case AMDGPU_INFO_FW_SDMA:
211 		if (query_fw->index >= adev->sdma.num_instances)
212 			return -EINVAL;
213 		fw_info->ver = adev->sdma.instance[query_fw->index].fw_version;
214 		fw_info->feature = adev->sdma.instance[query_fw->index].feature_version;
215 		break;
216 	case AMDGPU_INFO_FW_SOS:
217 		fw_info->ver = adev->psp.sos_fw_version;
218 		fw_info->feature = adev->psp.sos_feature_version;
219 		break;
220 	case AMDGPU_INFO_FW_ASD:
221 		fw_info->ver = adev->psp.asd_fw_version;
222 		fw_info->feature = adev->psp.asd_feature_version;
223 		break;
224 	default:
225 		return -EINVAL;
226 	}
227 	return 0;
228 }
229 
230 /*
231  * Userspace get information ioctl
232  */
233 /**
234  * amdgpu_info_ioctl - answer a device specific request.
235  *
236  * @adev: amdgpu device pointer
237  * @data: request object
238  * @filp: drm filp
239  *
240  * This function is used to pass device specific parameters to the userspace
241  * drivers.  Examples include: pci device id, pipeline parms, tiling params,
242  * etc. (all asics).
243  * Returns 0 on success, -EINVAL on failure.
244  */
245 static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
246 {
247 	struct amdgpu_device *adev = dev->dev_private;
248 	struct drm_amdgpu_info *info = data;
249 	struct amdgpu_mode_info *minfo = &adev->mode_info;
250 	void __user *out = (void __user *)(uintptr_t)info->return_pointer;
251 	uint32_t size = info->return_size;
252 	struct drm_crtc *crtc;
253 	uint32_t ui32 = 0;
254 	uint64_t ui64 = 0;
255 	int i, j, found;
256 	int ui32_size = sizeof(ui32);
257 
258 	if (!info->return_size || !info->return_pointer)
259 		return -EINVAL;
260 
261 	switch (info->query) {
262 	case AMDGPU_INFO_ACCEL_WORKING:
263 		ui32 = adev->accel_working;
264 		return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0;
265 	case AMDGPU_INFO_CRTC_FROM_ID:
266 		for (i = 0, found = 0; i < adev->mode_info.num_crtc; i++) {
267 			crtc = (struct drm_crtc *)minfo->crtcs[i];
268 			if (crtc && crtc->base.id == info->mode_crtc.id) {
269 				struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
270 				ui32 = amdgpu_crtc->crtc_id;
271 				found = 1;
272 				break;
273 			}
274 		}
275 		if (!found) {
276 			DRM_DEBUG_KMS("unknown crtc id %d\n", info->mode_crtc.id);
277 			return -EINVAL;
278 		}
279 		return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0;
280 	case AMDGPU_INFO_HW_IP_INFO: {
281 		struct drm_amdgpu_info_hw_ip ip = {};
282 		enum amd_ip_block_type type;
283 		uint32_t ring_mask = 0;
284 		uint32_t ib_start_alignment = 0;
285 		uint32_t ib_size_alignment = 0;
286 
287 		if (info->query_hw_ip.ip_instance >= AMDGPU_HW_IP_INSTANCE_MAX_COUNT)
288 			return -EINVAL;
289 
290 		switch (info->query_hw_ip.type) {
291 		case AMDGPU_HW_IP_GFX:
292 			type = AMD_IP_BLOCK_TYPE_GFX;
293 			for (i = 0; i < adev->gfx.num_gfx_rings; i++)
294 				ring_mask |= adev->gfx.gfx_ring[i].ready << i;
295 			ib_start_alignment = 32;
296 			ib_size_alignment = 32;
297 			break;
298 		case AMDGPU_HW_IP_COMPUTE:
299 			type = AMD_IP_BLOCK_TYPE_GFX;
300 			for (i = 0; i < adev->gfx.num_compute_rings; i++)
301 				ring_mask |= adev->gfx.compute_ring[i].ready << i;
302 			ib_start_alignment = 32;
303 			ib_size_alignment = 32;
304 			break;
305 		case AMDGPU_HW_IP_DMA:
306 			type = AMD_IP_BLOCK_TYPE_SDMA;
307 			for (i = 0; i < adev->sdma.num_instances; i++)
308 				ring_mask |= adev->sdma.instance[i].ring.ready << i;
309 			ib_start_alignment = 256;
310 			ib_size_alignment = 4;
311 			break;
312 		case AMDGPU_HW_IP_UVD:
313 			type = AMD_IP_BLOCK_TYPE_UVD;
314 			for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
315 				if (adev->uvd.harvest_config & (1 << i))
316 					continue;
317 				ring_mask |= adev->uvd.inst[i].ring.ready;
318 			}
319 			ib_start_alignment = 64;
320 			ib_size_alignment = 64;
321 			break;
322 		case AMDGPU_HW_IP_VCE:
323 			type = AMD_IP_BLOCK_TYPE_VCE;
324 			for (i = 0; i < adev->vce.num_rings; i++)
325 				ring_mask |= adev->vce.ring[i].ready << i;
326 			ib_start_alignment = 4;
327 			ib_size_alignment = 1;
328 			break;
329 		case AMDGPU_HW_IP_UVD_ENC:
330 			type = AMD_IP_BLOCK_TYPE_UVD;
331 			for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
332 				if (adev->uvd.harvest_config & (1 << i))
333 					continue;
334 				for (j = 0; j < adev->uvd.num_enc_rings; j++)
335 					ring_mask |= adev->uvd.inst[i].ring_enc[j].ready << j;
336 			}
337 			ib_start_alignment = 64;
338 			ib_size_alignment = 64;
339 			break;
340 		case AMDGPU_HW_IP_VCN_DEC:
341 			type = AMD_IP_BLOCK_TYPE_VCN;
342 			ring_mask = adev->vcn.ring_dec.ready;
343 			ib_start_alignment = 16;
344 			ib_size_alignment = 16;
345 			break;
346 		case AMDGPU_HW_IP_VCN_ENC:
347 			type = AMD_IP_BLOCK_TYPE_VCN;
348 			for (i = 0; i < adev->vcn.num_enc_rings; i++)
349 				ring_mask |= adev->vcn.ring_enc[i].ready << i;
350 			ib_start_alignment = 64;
351 			ib_size_alignment = 1;
352 			break;
353 		case AMDGPU_HW_IP_VCN_JPEG:
354 			type = AMD_IP_BLOCK_TYPE_VCN;
355 			ring_mask = adev->vcn.ring_jpeg.ready;
356 			ib_start_alignment = 16;
357 			ib_size_alignment = 16;
358 			break;
359 		default:
360 			return -EINVAL;
361 		}
362 
363 		for (i = 0; i < adev->num_ip_blocks; i++) {
364 			if (adev->ip_blocks[i].version->type == type &&
365 			    adev->ip_blocks[i].status.valid) {
366 				ip.hw_ip_version_major = adev->ip_blocks[i].version->major;
367 				ip.hw_ip_version_minor = adev->ip_blocks[i].version->minor;
368 				ip.capabilities_flags = 0;
369 				ip.available_rings = ring_mask;
370 				ip.ib_start_alignment = ib_start_alignment;
371 				ip.ib_size_alignment = ib_size_alignment;
372 				break;
373 			}
374 		}
375 		return copy_to_user(out, &ip,
376 				    min((size_t)size, sizeof(ip))) ? -EFAULT : 0;
377 	}
378 	case AMDGPU_INFO_HW_IP_COUNT: {
379 		enum amd_ip_block_type type;
380 		uint32_t count = 0;
381 
382 		switch (info->query_hw_ip.type) {
383 		case AMDGPU_HW_IP_GFX:
384 			type = AMD_IP_BLOCK_TYPE_GFX;
385 			break;
386 		case AMDGPU_HW_IP_COMPUTE:
387 			type = AMD_IP_BLOCK_TYPE_GFX;
388 			break;
389 		case AMDGPU_HW_IP_DMA:
390 			type = AMD_IP_BLOCK_TYPE_SDMA;
391 			break;
392 		case AMDGPU_HW_IP_UVD:
393 			type = AMD_IP_BLOCK_TYPE_UVD;
394 			break;
395 		case AMDGPU_HW_IP_VCE:
396 			type = AMD_IP_BLOCK_TYPE_VCE;
397 			break;
398 		case AMDGPU_HW_IP_UVD_ENC:
399 			type = AMD_IP_BLOCK_TYPE_UVD;
400 			break;
401 		case AMDGPU_HW_IP_VCN_DEC:
402 		case AMDGPU_HW_IP_VCN_ENC:
403 		case AMDGPU_HW_IP_VCN_JPEG:
404 			type = AMD_IP_BLOCK_TYPE_VCN;
405 			break;
406 		default:
407 			return -EINVAL;
408 		}
409 
410 		for (i = 0; i < adev->num_ip_blocks; i++)
411 			if (adev->ip_blocks[i].version->type == type &&
412 			    adev->ip_blocks[i].status.valid &&
413 			    count < AMDGPU_HW_IP_INSTANCE_MAX_COUNT)
414 				count++;
415 
416 		return copy_to_user(out, &count, min(size, 4u)) ? -EFAULT : 0;
417 	}
418 	case AMDGPU_INFO_TIMESTAMP:
419 		ui64 = amdgpu_gfx_get_gpu_clock_counter(adev);
420 		return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
421 	case AMDGPU_INFO_FW_VERSION: {
422 		struct drm_amdgpu_info_firmware fw_info;
423 		int ret;
424 
425 		/* We only support one instance of each IP block right now. */
426 		if (info->query_fw.ip_instance != 0)
427 			return -EINVAL;
428 
429 		ret = amdgpu_firmware_info(&fw_info, &info->query_fw, adev);
430 		if (ret)
431 			return ret;
432 
433 		return copy_to_user(out, &fw_info,
434 				    min((size_t)size, sizeof(fw_info))) ? -EFAULT : 0;
435 	}
436 	case AMDGPU_INFO_NUM_BYTES_MOVED:
437 		ui64 = atomic64_read(&adev->num_bytes_moved);
438 		return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
439 	case AMDGPU_INFO_NUM_EVICTIONS:
440 		ui64 = atomic64_read(&adev->num_evictions);
441 		return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
442 	case AMDGPU_INFO_NUM_VRAM_CPU_PAGE_FAULTS:
443 		ui64 = atomic64_read(&adev->num_vram_cpu_page_faults);
444 		return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
445 	case AMDGPU_INFO_VRAM_USAGE:
446 		ui64 = amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
447 		return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
448 	case AMDGPU_INFO_VIS_VRAM_USAGE:
449 		ui64 = amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
450 		return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
451 	case AMDGPU_INFO_GTT_USAGE:
452 		ui64 = amdgpu_gtt_mgr_usage(&adev->mman.bdev.man[TTM_PL_TT]);
453 		return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
454 	case AMDGPU_INFO_GDS_CONFIG: {
455 		struct drm_amdgpu_info_gds gds_info;
456 
457 		memset(&gds_info, 0, sizeof(gds_info));
458 		gds_info.gds_gfx_partition_size = adev->gds.mem.gfx_partition_size >> AMDGPU_GDS_SHIFT;
459 		gds_info.compute_partition_size = adev->gds.mem.cs_partition_size >> AMDGPU_GDS_SHIFT;
460 		gds_info.gds_total_size = adev->gds.mem.total_size >> AMDGPU_GDS_SHIFT;
461 		gds_info.gws_per_gfx_partition = adev->gds.gws.gfx_partition_size >> AMDGPU_GWS_SHIFT;
462 		gds_info.gws_per_compute_partition = adev->gds.gws.cs_partition_size >> AMDGPU_GWS_SHIFT;
463 		gds_info.oa_per_gfx_partition = adev->gds.oa.gfx_partition_size >> AMDGPU_OA_SHIFT;
464 		gds_info.oa_per_compute_partition = adev->gds.oa.cs_partition_size >> AMDGPU_OA_SHIFT;
465 		return copy_to_user(out, &gds_info,
466 				    min((size_t)size, sizeof(gds_info))) ? -EFAULT : 0;
467 	}
468 	case AMDGPU_INFO_VRAM_GTT: {
469 		struct drm_amdgpu_info_vram_gtt vram_gtt;
470 
471 		vram_gtt.vram_size = adev->gmc.real_vram_size -
472 			atomic64_read(&adev->vram_pin_size);
473 		vram_gtt.vram_cpu_accessible_size = adev->gmc.visible_vram_size -
474 			atomic64_read(&adev->visible_pin_size);
475 		vram_gtt.gtt_size = adev->mman.bdev.man[TTM_PL_TT].size;
476 		vram_gtt.gtt_size *= PAGE_SIZE;
477 		vram_gtt.gtt_size -= atomic64_read(&adev->gart_pin_size);
478 		return copy_to_user(out, &vram_gtt,
479 				    min((size_t)size, sizeof(vram_gtt))) ? -EFAULT : 0;
480 	}
481 	case AMDGPU_INFO_MEMORY: {
482 		struct drm_amdgpu_memory_info mem;
483 
484 		memset(&mem, 0, sizeof(mem));
485 		mem.vram.total_heap_size = adev->gmc.real_vram_size;
486 		mem.vram.usable_heap_size = adev->gmc.real_vram_size -
487 			atomic64_read(&adev->vram_pin_size);
488 		mem.vram.heap_usage =
489 			amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
490 		mem.vram.max_allocation = mem.vram.usable_heap_size * 3 / 4;
491 
492 		mem.cpu_accessible_vram.total_heap_size =
493 			adev->gmc.visible_vram_size;
494 		mem.cpu_accessible_vram.usable_heap_size = adev->gmc.visible_vram_size -
495 			atomic64_read(&adev->visible_pin_size);
496 		mem.cpu_accessible_vram.heap_usage =
497 			amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
498 		mem.cpu_accessible_vram.max_allocation =
499 			mem.cpu_accessible_vram.usable_heap_size * 3 / 4;
500 
501 		mem.gtt.total_heap_size = adev->mman.bdev.man[TTM_PL_TT].size;
502 		mem.gtt.total_heap_size *= PAGE_SIZE;
503 		mem.gtt.usable_heap_size = mem.gtt.total_heap_size -
504 			atomic64_read(&adev->gart_pin_size);
505 		mem.gtt.heap_usage =
506 			amdgpu_gtt_mgr_usage(&adev->mman.bdev.man[TTM_PL_TT]);
507 		mem.gtt.max_allocation = mem.gtt.usable_heap_size * 3 / 4;
508 
509 		return copy_to_user(out, &mem,
510 				    min((size_t)size, sizeof(mem)))
511 				    ? -EFAULT : 0;
512 	}
513 	case AMDGPU_INFO_READ_MMR_REG: {
514 		unsigned n, alloc_size;
515 		uint32_t *regs;
516 		unsigned se_num = (info->read_mmr_reg.instance >>
517 				   AMDGPU_INFO_MMR_SE_INDEX_SHIFT) &
518 				  AMDGPU_INFO_MMR_SE_INDEX_MASK;
519 		unsigned sh_num = (info->read_mmr_reg.instance >>
520 				   AMDGPU_INFO_MMR_SH_INDEX_SHIFT) &
521 				  AMDGPU_INFO_MMR_SH_INDEX_MASK;
522 
523 		/* set full masks if the userspace set all bits
524 		 * in the bitfields */
525 		if (se_num == AMDGPU_INFO_MMR_SE_INDEX_MASK)
526 			se_num = 0xffffffff;
527 		else if (se_num >= AMDGPU_GFX_MAX_SE)
528 			return -EINVAL;
529 		if (sh_num == AMDGPU_INFO_MMR_SH_INDEX_MASK)
530 			sh_num = 0xffffffff;
531 		else if (sh_num >= AMDGPU_GFX_MAX_SH_PER_SE)
532 			return -EINVAL;
533 
534 		if (info->read_mmr_reg.count > 128)
535 			return -EINVAL;
536 
537 		regs = kmalloc_array(info->read_mmr_reg.count, sizeof(*regs), GFP_KERNEL);
538 		if (!regs)
539 			return -ENOMEM;
540 		alloc_size = info->read_mmr_reg.count * sizeof(*regs);
541 
542 		for (i = 0; i < info->read_mmr_reg.count; i++)
543 			if (amdgpu_asic_read_register(adev, se_num, sh_num,
544 						      info->read_mmr_reg.dword_offset + i,
545 						      &regs[i])) {
546 				DRM_DEBUG_KMS("unallowed offset %#x\n",
547 					      info->read_mmr_reg.dword_offset + i);
548 				kfree(regs);
549 				return -EFAULT;
550 			}
551 		n = copy_to_user(out, regs, min(size, alloc_size));
552 		kfree(regs);
553 		return n ? -EFAULT : 0;
554 	}
555 	case AMDGPU_INFO_DEV_INFO: {
556 		struct drm_amdgpu_info_device dev_info;
557 		uint64_t vm_size;
558 
559 		memset(&dev_info, 0, sizeof(dev_info));
560 		dev_info.device_id = dev->pdev->device;
561 		dev_info.chip_rev = adev->rev_id;
562 		dev_info.external_rev = adev->external_rev_id;
563 		dev_info.pci_rev = dev->pdev->revision;
564 		dev_info.family = adev->family;
565 		dev_info.num_shader_engines = adev->gfx.config.max_shader_engines;
566 		dev_info.num_shader_arrays_per_engine = adev->gfx.config.max_sh_per_se;
567 		/* return all clocks in KHz */
568 		dev_info.gpu_counter_freq = amdgpu_asic_get_xclk(adev) * 10;
569 		if (adev->pm.dpm_enabled) {
570 			dev_info.max_engine_clock = amdgpu_dpm_get_sclk(adev, false) * 10;
571 			dev_info.max_memory_clock = amdgpu_dpm_get_mclk(adev, false) * 10;
572 		} else {
573 			dev_info.max_engine_clock = adev->clock.default_sclk * 10;
574 			dev_info.max_memory_clock = adev->clock.default_mclk * 10;
575 		}
576 		dev_info.enabled_rb_pipes_mask = adev->gfx.config.backend_enable_mask;
577 		dev_info.num_rb_pipes = adev->gfx.config.max_backends_per_se *
578 			adev->gfx.config.max_shader_engines;
579 		dev_info.num_hw_gfx_contexts = adev->gfx.config.max_hw_contexts;
580 		dev_info._pad = 0;
581 		dev_info.ids_flags = 0;
582 		if (adev->flags & AMD_IS_APU)
583 			dev_info.ids_flags |= AMDGPU_IDS_FLAGS_FUSION;
584 		if (amdgpu_sriov_vf(adev))
585 			dev_info.ids_flags |= AMDGPU_IDS_FLAGS_PREEMPTION;
586 
587 		vm_size = adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE;
588 		vm_size -= AMDGPU_VA_RESERVED_SIZE;
589 
590 		/* Older VCE FW versions are buggy and can handle only 40bits */
591 		if (adev->vce.fw_version < AMDGPU_VCE_FW_53_45)
592 			vm_size = min(vm_size, 1ULL << 40);
593 
594 		dev_info.virtual_address_offset = AMDGPU_VA_RESERVED_SIZE;
595 		dev_info.virtual_address_max =
596 			min(vm_size, AMDGPU_VA_HOLE_START);
597 
598 		if (vm_size > AMDGPU_VA_HOLE_START) {
599 			dev_info.high_va_offset = AMDGPU_VA_HOLE_END;
600 			dev_info.high_va_max = AMDGPU_VA_HOLE_END | vm_size;
601 		}
602 		dev_info.virtual_address_alignment = max((int)PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE);
603 		dev_info.pte_fragment_size = (1 << adev->vm_manager.fragment_size) * AMDGPU_GPU_PAGE_SIZE;
604 		dev_info.gart_page_size = AMDGPU_GPU_PAGE_SIZE;
605 		dev_info.cu_active_number = adev->gfx.cu_info.number;
606 		dev_info.cu_ao_mask = adev->gfx.cu_info.ao_cu_mask;
607 		dev_info.ce_ram_size = adev->gfx.ce_ram_size;
608 		memcpy(&dev_info.cu_ao_bitmap[0], &adev->gfx.cu_info.ao_cu_bitmap[0],
609 		       sizeof(adev->gfx.cu_info.ao_cu_bitmap));
610 		memcpy(&dev_info.cu_bitmap[0], &adev->gfx.cu_info.bitmap[0],
611 		       sizeof(adev->gfx.cu_info.bitmap));
612 		dev_info.vram_type = adev->gmc.vram_type;
613 		dev_info.vram_bit_width = adev->gmc.vram_width;
614 		dev_info.vce_harvest_config = adev->vce.harvest_config;
615 		dev_info.gc_double_offchip_lds_buf =
616 			adev->gfx.config.double_offchip_lds_buf;
617 
618 		if (amdgpu_ngg) {
619 			dev_info.prim_buf_gpu_addr = adev->gfx.ngg.buf[NGG_PRIM].gpu_addr;
620 			dev_info.prim_buf_size = adev->gfx.ngg.buf[NGG_PRIM].size;
621 			dev_info.pos_buf_gpu_addr = adev->gfx.ngg.buf[NGG_POS].gpu_addr;
622 			dev_info.pos_buf_size = adev->gfx.ngg.buf[NGG_POS].size;
623 			dev_info.cntl_sb_buf_gpu_addr = adev->gfx.ngg.buf[NGG_CNTL].gpu_addr;
624 			dev_info.cntl_sb_buf_size = adev->gfx.ngg.buf[NGG_CNTL].size;
625 			dev_info.param_buf_gpu_addr = adev->gfx.ngg.buf[NGG_PARAM].gpu_addr;
626 			dev_info.param_buf_size = adev->gfx.ngg.buf[NGG_PARAM].size;
627 		}
628 		dev_info.wave_front_size = adev->gfx.cu_info.wave_front_size;
629 		dev_info.num_shader_visible_vgprs = adev->gfx.config.max_gprs;
630 		dev_info.num_cu_per_sh = adev->gfx.config.max_cu_per_sh;
631 		dev_info.num_tcc_blocks = adev->gfx.config.max_texture_channel_caches;
632 		dev_info.gs_vgt_table_depth = adev->gfx.config.gs_vgt_table_depth;
633 		dev_info.gs_prim_buffer_depth = adev->gfx.config.gs_prim_buffer_depth;
634 		dev_info.max_gs_waves_per_vgt = adev->gfx.config.max_gs_threads;
635 
636 		return copy_to_user(out, &dev_info,
637 				    min((size_t)size, sizeof(dev_info))) ? -EFAULT : 0;
638 	}
639 	case AMDGPU_INFO_VCE_CLOCK_TABLE: {
640 		unsigned i;
641 		struct drm_amdgpu_info_vce_clock_table vce_clk_table = {};
642 		struct amd_vce_state *vce_state;
643 
644 		for (i = 0; i < AMDGPU_VCE_CLOCK_TABLE_ENTRIES; i++) {
645 			vce_state = amdgpu_dpm_get_vce_clock_state(adev, i);
646 			if (vce_state) {
647 				vce_clk_table.entries[i].sclk = vce_state->sclk;
648 				vce_clk_table.entries[i].mclk = vce_state->mclk;
649 				vce_clk_table.entries[i].eclk = vce_state->evclk;
650 				vce_clk_table.num_valid_entries++;
651 			}
652 		}
653 
654 		return copy_to_user(out, &vce_clk_table,
655 				    min((size_t)size, sizeof(vce_clk_table))) ? -EFAULT : 0;
656 	}
657 	case AMDGPU_INFO_VBIOS: {
658 		uint32_t bios_size = adev->bios_size;
659 
660 		switch (info->vbios_info.type) {
661 		case AMDGPU_INFO_VBIOS_SIZE:
662 			return copy_to_user(out, &bios_size,
663 					min((size_t)size, sizeof(bios_size)))
664 					? -EFAULT : 0;
665 		case AMDGPU_INFO_VBIOS_IMAGE: {
666 			uint8_t *bios;
667 			uint32_t bios_offset = info->vbios_info.offset;
668 
669 			if (bios_offset >= bios_size)
670 				return -EINVAL;
671 
672 			bios = adev->bios + bios_offset;
673 			return copy_to_user(out, bios,
674 					    min((size_t)size, (size_t)(bios_size - bios_offset)))
675 					? -EFAULT : 0;
676 		}
677 		default:
678 			DRM_DEBUG_KMS("Invalid request %d\n",
679 					info->vbios_info.type);
680 			return -EINVAL;
681 		}
682 	}
683 	case AMDGPU_INFO_NUM_HANDLES: {
684 		struct drm_amdgpu_info_num_handles handle;
685 
686 		switch (info->query_hw_ip.type) {
687 		case AMDGPU_HW_IP_UVD:
688 			/* Starting Polaris, we support unlimited UVD handles */
689 			if (adev->asic_type < CHIP_POLARIS10) {
690 				handle.uvd_max_handles = adev->uvd.max_handles;
691 				handle.uvd_used_handles = amdgpu_uvd_used_handles(adev);
692 
693 				return copy_to_user(out, &handle,
694 					min((size_t)size, sizeof(handle))) ? -EFAULT : 0;
695 			} else {
696 				return -ENODATA;
697 			}
698 
699 			break;
700 		default:
701 			return -EINVAL;
702 		}
703 	}
704 	case AMDGPU_INFO_SENSOR: {
705 		if (!adev->pm.dpm_enabled)
706 			return -ENOENT;
707 
708 		switch (info->sensor_info.type) {
709 		case AMDGPU_INFO_SENSOR_GFX_SCLK:
710 			/* get sclk in Mhz */
711 			if (amdgpu_dpm_read_sensor(adev,
712 						   AMDGPU_PP_SENSOR_GFX_SCLK,
713 						   (void *)&ui32, &ui32_size)) {
714 				return -EINVAL;
715 			}
716 			ui32 /= 100;
717 			break;
718 		case AMDGPU_INFO_SENSOR_GFX_MCLK:
719 			/* get mclk in Mhz */
720 			if (amdgpu_dpm_read_sensor(adev,
721 						   AMDGPU_PP_SENSOR_GFX_MCLK,
722 						   (void *)&ui32, &ui32_size)) {
723 				return -EINVAL;
724 			}
725 			ui32 /= 100;
726 			break;
727 		case AMDGPU_INFO_SENSOR_GPU_TEMP:
728 			/* get temperature in millidegrees C */
729 			if (amdgpu_dpm_read_sensor(adev,
730 						   AMDGPU_PP_SENSOR_GPU_TEMP,
731 						   (void *)&ui32, &ui32_size)) {
732 				return -EINVAL;
733 			}
734 			break;
735 		case AMDGPU_INFO_SENSOR_GPU_LOAD:
736 			/* get GPU load */
737 			if (amdgpu_dpm_read_sensor(adev,
738 						   AMDGPU_PP_SENSOR_GPU_LOAD,
739 						   (void *)&ui32, &ui32_size)) {
740 				return -EINVAL;
741 			}
742 			break;
743 		case AMDGPU_INFO_SENSOR_GPU_AVG_POWER:
744 			/* get average GPU power */
745 			if (amdgpu_dpm_read_sensor(adev,
746 						   AMDGPU_PP_SENSOR_GPU_POWER,
747 						   (void *)&ui32, &ui32_size)) {
748 				return -EINVAL;
749 			}
750 			ui32 >>= 8;
751 			break;
752 		case AMDGPU_INFO_SENSOR_VDDNB:
753 			/* get VDDNB in millivolts */
754 			if (amdgpu_dpm_read_sensor(adev,
755 						   AMDGPU_PP_SENSOR_VDDNB,
756 						   (void *)&ui32, &ui32_size)) {
757 				return -EINVAL;
758 			}
759 			break;
760 		case AMDGPU_INFO_SENSOR_VDDGFX:
761 			/* get VDDGFX in millivolts */
762 			if (amdgpu_dpm_read_sensor(adev,
763 						   AMDGPU_PP_SENSOR_VDDGFX,
764 						   (void *)&ui32, &ui32_size)) {
765 				return -EINVAL;
766 			}
767 			break;
768 		case AMDGPU_INFO_SENSOR_STABLE_PSTATE_GFX_SCLK:
769 			/* get stable pstate sclk in Mhz */
770 			if (amdgpu_dpm_read_sensor(adev,
771 						   AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK,
772 						   (void *)&ui32, &ui32_size)) {
773 				return -EINVAL;
774 			}
775 			ui32 /= 100;
776 			break;
777 		case AMDGPU_INFO_SENSOR_STABLE_PSTATE_GFX_MCLK:
778 			/* get stable pstate mclk in Mhz */
779 			if (amdgpu_dpm_read_sensor(adev,
780 						   AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK,
781 						   (void *)&ui32, &ui32_size)) {
782 				return -EINVAL;
783 			}
784 			ui32 /= 100;
785 			break;
786 		default:
787 			DRM_DEBUG_KMS("Invalid request %d\n",
788 				      info->sensor_info.type);
789 			return -EINVAL;
790 		}
791 		return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0;
792 	}
793 	case AMDGPU_INFO_VRAM_LOST_COUNTER:
794 		ui32 = atomic_read(&adev->vram_lost_counter);
795 		return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0;
796 	default:
797 		DRM_DEBUG_KMS("Invalid request %d\n", info->query);
798 		return -EINVAL;
799 	}
800 	return 0;
801 }
802 
803 
804 /*
805  * Outdated mess for old drm with Xorg being in charge (void function now).
806  */
807 /**
808  * amdgpu_driver_lastclose_kms - drm callback for last close
809  *
810  * @dev: drm dev pointer
811  *
812  * Switch vga_switcheroo state after last close (all asics).
813  */
814 void amdgpu_driver_lastclose_kms(struct drm_device *dev)
815 {
816 	drm_fb_helper_lastclose(dev);
817 	vga_switcheroo_process_delayed_switch();
818 }
819 
820 /**
821  * amdgpu_driver_open_kms - drm callback for open
822  *
823  * @dev: drm dev pointer
824  * @file_priv: drm file
825  *
826  * On device open, init vm on cayman+ (all asics).
827  * Returns 0 on success, error on failure.
828  */
829 int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
830 {
831 	struct amdgpu_device *adev = dev->dev_private;
832 	struct amdgpu_fpriv *fpriv;
833 	int r, pasid;
834 
835 	/* Ensure IB tests are run on ring */
836 	flush_delayed_work(&adev->late_init_work);
837 
838 	file_priv->driver_priv = NULL;
839 
840 	r = pm_runtime_get_sync(dev->dev);
841 	if (r < 0)
842 		goto pm_put;
843 
844 	fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
845 	if (unlikely(!fpriv)) {
846 		r = -ENOMEM;
847 		goto out_suspend;
848 	}
849 
850 	pasid = amdgpu_pasid_alloc(16);
851 	if (pasid < 0) {
852 		dev_warn(adev->dev, "No more PASIDs available!");
853 		pasid = 0;
854 	}
855 	r = amdgpu_vm_init(adev, &fpriv->vm, AMDGPU_VM_CONTEXT_GFX, pasid);
856 	if (r)
857 		goto error_pasid;
858 
859 	fpriv->prt_va = amdgpu_vm_bo_add(adev, &fpriv->vm, NULL);
860 	if (!fpriv->prt_va) {
861 		r = -ENOMEM;
862 		goto error_vm;
863 	}
864 
865 	if (amdgpu_sriov_vf(adev)) {
866 		r = amdgpu_map_static_csa(adev, &fpriv->vm, &fpriv->csa_va);
867 		if (r)
868 			goto error_vm;
869 	}
870 
871 	mutex_init(&fpriv->bo_list_lock);
872 	idr_init(&fpriv->bo_list_handles);
873 
874 	amdgpu_ctx_mgr_init(&fpriv->ctx_mgr);
875 
876 	file_priv->driver_priv = fpriv;
877 	goto out_suspend;
878 
879 error_vm:
880 	amdgpu_vm_fini(adev, &fpriv->vm);
881 
882 error_pasid:
883 	if (pasid)
884 		amdgpu_pasid_free(pasid);
885 
886 	kfree(fpriv);
887 
888 out_suspend:
889 	pm_runtime_mark_last_busy(dev->dev);
890 pm_put:
891 	pm_runtime_put_autosuspend(dev->dev);
892 
893 	return r;
894 }
895 
896 /**
897  * amdgpu_driver_postclose_kms - drm callback for post close
898  *
899  * @dev: drm dev pointer
900  * @file_priv: drm file
901  *
902  * On device post close, tear down vm on cayman+ (all asics).
903  */
904 void amdgpu_driver_postclose_kms(struct drm_device *dev,
905 				 struct drm_file *file_priv)
906 {
907 	struct amdgpu_device *adev = dev->dev_private;
908 	struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
909 	struct amdgpu_bo_list *list;
910 	struct amdgpu_bo *pd;
911 	unsigned int pasid;
912 	int handle;
913 
914 	if (!fpriv)
915 		return;
916 
917 	pm_runtime_get_sync(dev->dev);
918 
919 	if (adev->asic_type != CHIP_RAVEN) {
920 		amdgpu_uvd_free_handles(adev, file_priv);
921 		amdgpu_vce_free_handles(adev, file_priv);
922 	}
923 
924 	amdgpu_vm_bo_rmv(adev, fpriv->prt_va);
925 
926 	if (amdgpu_sriov_vf(adev)) {
927 		/* TODO: how to handle reserve failure */
928 		BUG_ON(amdgpu_bo_reserve(adev->virt.csa_obj, true));
929 		amdgpu_vm_bo_rmv(adev, fpriv->csa_va);
930 		fpriv->csa_va = NULL;
931 		amdgpu_bo_unreserve(adev->virt.csa_obj);
932 	}
933 
934 	pasid = fpriv->vm.pasid;
935 	pd = amdgpu_bo_ref(fpriv->vm.root.base.bo);
936 
937 	amdgpu_vm_fini(adev, &fpriv->vm);
938 	amdgpu_ctx_mgr_fini(&fpriv->ctx_mgr);
939 
940 	if (pasid)
941 		amdgpu_pasid_free_delayed(pd->tbo.resv, pasid);
942 	amdgpu_bo_unref(&pd);
943 
944 	idr_for_each_entry(&fpriv->bo_list_handles, list, handle)
945 		amdgpu_bo_list_put(list);
946 
947 	idr_destroy(&fpriv->bo_list_handles);
948 	mutex_destroy(&fpriv->bo_list_lock);
949 
950 	kfree(fpriv);
951 	file_priv->driver_priv = NULL;
952 
953 	pm_runtime_mark_last_busy(dev->dev);
954 	pm_runtime_put_autosuspend(dev->dev);
955 }
956 
957 /*
958  * VBlank related functions.
959  */
960 /**
961  * amdgpu_get_vblank_counter_kms - get frame count
962  *
963  * @dev: drm dev pointer
964  * @pipe: crtc to get the frame count from
965  *
966  * Gets the frame count on the requested crtc (all asics).
967  * Returns frame count on success, -EINVAL on failure.
968  */
969 u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe)
970 {
971 	struct amdgpu_device *adev = dev->dev_private;
972 	int vpos, hpos, stat;
973 	u32 count;
974 
975 	if (pipe >= adev->mode_info.num_crtc) {
976 		DRM_ERROR("Invalid crtc %u\n", pipe);
977 		return -EINVAL;
978 	}
979 
980 	/* The hw increments its frame counter at start of vsync, not at start
981 	 * of vblank, as is required by DRM core vblank counter handling.
982 	 * Cook the hw count here to make it appear to the caller as if it
983 	 * incremented at start of vblank. We measure distance to start of
984 	 * vblank in vpos. vpos therefore will be >= 0 between start of vblank
985 	 * and start of vsync, so vpos >= 0 means to bump the hw frame counter
986 	 * result by 1 to give the proper appearance to caller.
987 	 */
988 	if (adev->mode_info.crtcs[pipe]) {
989 		/* Repeat readout if needed to provide stable result if
990 		 * we cross start of vsync during the queries.
991 		 */
992 		do {
993 			count = amdgpu_display_vblank_get_counter(adev, pipe);
994 			/* Ask amdgpu_display_get_crtc_scanoutpos to return
995 			 * vpos as distance to start of vblank, instead of
996 			 * regular vertical scanout pos.
997 			 */
998 			stat = amdgpu_display_get_crtc_scanoutpos(
999 				dev, pipe, GET_DISTANCE_TO_VBLANKSTART,
1000 				&vpos, &hpos, NULL, NULL,
1001 				&adev->mode_info.crtcs[pipe]->base.hwmode);
1002 		} while (count != amdgpu_display_vblank_get_counter(adev, pipe));
1003 
1004 		if (((stat & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE)) !=
1005 		    (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE))) {
1006 			DRM_DEBUG_VBL("Query failed! stat %d\n", stat);
1007 		} else {
1008 			DRM_DEBUG_VBL("crtc %d: dist from vblank start %d\n",
1009 				      pipe, vpos);
1010 
1011 			/* Bump counter if we are at >= leading edge of vblank,
1012 			 * but before vsync where vpos would turn negative and
1013 			 * the hw counter really increments.
1014 			 */
1015 			if (vpos >= 0)
1016 				count++;
1017 		}
1018 	} else {
1019 		/* Fallback to use value as is. */
1020 		count = amdgpu_display_vblank_get_counter(adev, pipe);
1021 		DRM_DEBUG_VBL("NULL mode info! Returned count may be wrong.\n");
1022 	}
1023 
1024 	return count;
1025 }
1026 
1027 /**
1028  * amdgpu_enable_vblank_kms - enable vblank interrupt
1029  *
1030  * @dev: drm dev pointer
1031  * @pipe: crtc to enable vblank interrupt for
1032  *
1033  * Enable the interrupt on the requested crtc (all asics).
1034  * Returns 0 on success, -EINVAL on failure.
1035  */
1036 int amdgpu_enable_vblank_kms(struct drm_device *dev, unsigned int pipe)
1037 {
1038 	struct amdgpu_device *adev = dev->dev_private;
1039 	int idx = amdgpu_display_crtc_idx_to_irq_type(adev, pipe);
1040 
1041 	return amdgpu_irq_get(adev, &adev->crtc_irq, idx);
1042 }
1043 
1044 /**
1045  * amdgpu_disable_vblank_kms - disable vblank interrupt
1046  *
1047  * @dev: drm dev pointer
1048  * @pipe: crtc to disable vblank interrupt for
1049  *
1050  * Disable the interrupt on the requested crtc (all asics).
1051  */
1052 void amdgpu_disable_vblank_kms(struct drm_device *dev, unsigned int pipe)
1053 {
1054 	struct amdgpu_device *adev = dev->dev_private;
1055 	int idx = amdgpu_display_crtc_idx_to_irq_type(adev, pipe);
1056 
1057 	amdgpu_irq_put(adev, &adev->crtc_irq, idx);
1058 }
1059 
1060 const struct drm_ioctl_desc amdgpu_ioctls_kms[] = {
1061 	DRM_IOCTL_DEF_DRV(AMDGPU_GEM_CREATE, amdgpu_gem_create_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1062 	DRM_IOCTL_DEF_DRV(AMDGPU_CTX, amdgpu_ctx_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1063 	DRM_IOCTL_DEF_DRV(AMDGPU_VM, amdgpu_vm_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1064 	DRM_IOCTL_DEF_DRV(AMDGPU_SCHED, amdgpu_sched_ioctl, DRM_MASTER),
1065 	DRM_IOCTL_DEF_DRV(AMDGPU_BO_LIST, amdgpu_bo_list_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1066 	DRM_IOCTL_DEF_DRV(AMDGPU_FENCE_TO_HANDLE, amdgpu_cs_fence_to_handle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1067 	/* KMS */
1068 	DRM_IOCTL_DEF_DRV(AMDGPU_GEM_MMAP, amdgpu_gem_mmap_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1069 	DRM_IOCTL_DEF_DRV(AMDGPU_GEM_WAIT_IDLE, amdgpu_gem_wait_idle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1070 	DRM_IOCTL_DEF_DRV(AMDGPU_CS, amdgpu_cs_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1071 	DRM_IOCTL_DEF_DRV(AMDGPU_INFO, amdgpu_info_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1072 	DRM_IOCTL_DEF_DRV(AMDGPU_WAIT_CS, amdgpu_cs_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1073 	DRM_IOCTL_DEF_DRV(AMDGPU_WAIT_FENCES, amdgpu_cs_wait_fences_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1074 	DRM_IOCTL_DEF_DRV(AMDGPU_GEM_METADATA, amdgpu_gem_metadata_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1075 	DRM_IOCTL_DEF_DRV(AMDGPU_GEM_VA, amdgpu_gem_va_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1076 	DRM_IOCTL_DEF_DRV(AMDGPU_GEM_OP, amdgpu_gem_op_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1077 	DRM_IOCTL_DEF_DRV(AMDGPU_GEM_USERPTR, amdgpu_gem_userptr_ioctl, DRM_AUTH|DRM_RENDER_ALLOW)
1078 };
1079 const int amdgpu_max_kms_ioctl = ARRAY_SIZE(amdgpu_ioctls_kms);
1080 
1081 /*
1082  * Debugfs info
1083  */
1084 #if defined(CONFIG_DEBUG_FS)
1085 
1086 static int amdgpu_debugfs_firmware_info(struct seq_file *m, void *data)
1087 {
1088 	struct drm_info_node *node = (struct drm_info_node *) m->private;
1089 	struct drm_device *dev = node->minor->dev;
1090 	struct amdgpu_device *adev = dev->dev_private;
1091 	struct drm_amdgpu_info_firmware fw_info;
1092 	struct drm_amdgpu_query_fw query_fw;
1093 	struct atom_context *ctx = adev->mode_info.atom_context;
1094 	int ret, i;
1095 
1096 	/* VCE */
1097 	query_fw.fw_type = AMDGPU_INFO_FW_VCE;
1098 	ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1099 	if (ret)
1100 		return ret;
1101 	seq_printf(m, "VCE feature version: %u, firmware version: 0x%08x\n",
1102 		   fw_info.feature, fw_info.ver);
1103 
1104 	/* UVD */
1105 	query_fw.fw_type = AMDGPU_INFO_FW_UVD;
1106 	ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1107 	if (ret)
1108 		return ret;
1109 	seq_printf(m, "UVD feature version: %u, firmware version: 0x%08x\n",
1110 		   fw_info.feature, fw_info.ver);
1111 
1112 	/* GMC */
1113 	query_fw.fw_type = AMDGPU_INFO_FW_GMC;
1114 	ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1115 	if (ret)
1116 		return ret;
1117 	seq_printf(m, "MC feature version: %u, firmware version: 0x%08x\n",
1118 		   fw_info.feature, fw_info.ver);
1119 
1120 	/* ME */
1121 	query_fw.fw_type = AMDGPU_INFO_FW_GFX_ME;
1122 	ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1123 	if (ret)
1124 		return ret;
1125 	seq_printf(m, "ME feature version: %u, firmware version: 0x%08x\n",
1126 		   fw_info.feature, fw_info.ver);
1127 
1128 	/* PFP */
1129 	query_fw.fw_type = AMDGPU_INFO_FW_GFX_PFP;
1130 	ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1131 	if (ret)
1132 		return ret;
1133 	seq_printf(m, "PFP feature version: %u, firmware version: 0x%08x\n",
1134 		   fw_info.feature, fw_info.ver);
1135 
1136 	/* CE */
1137 	query_fw.fw_type = AMDGPU_INFO_FW_GFX_CE;
1138 	ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1139 	if (ret)
1140 		return ret;
1141 	seq_printf(m, "CE feature version: %u, firmware version: 0x%08x\n",
1142 		   fw_info.feature, fw_info.ver);
1143 
1144 	/* RLC */
1145 	query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLC;
1146 	ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1147 	if (ret)
1148 		return ret;
1149 	seq_printf(m, "RLC feature version: %u, firmware version: 0x%08x\n",
1150 		   fw_info.feature, fw_info.ver);
1151 
1152 	/* RLC SAVE RESTORE LIST CNTL */
1153 	query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_CNTL;
1154 	ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1155 	if (ret)
1156 		return ret;
1157 	seq_printf(m, "RLC SRLC feature version: %u, firmware version: 0x%08x\n",
1158 		   fw_info.feature, fw_info.ver);
1159 
1160 	/* RLC SAVE RESTORE LIST GPM MEM */
1161 	query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_GPM_MEM;
1162 	ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1163 	if (ret)
1164 		return ret;
1165 	seq_printf(m, "RLC SRLG feature version: %u, firmware version: 0x%08x\n",
1166 		   fw_info.feature, fw_info.ver);
1167 
1168 	/* RLC SAVE RESTORE LIST SRM MEM */
1169 	query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_SRM_MEM;
1170 	ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1171 	if (ret)
1172 		return ret;
1173 	seq_printf(m, "RLC SRLS feature version: %u, firmware version: 0x%08x\n",
1174 		   fw_info.feature, fw_info.ver);
1175 
1176 	/* MEC */
1177 	query_fw.fw_type = AMDGPU_INFO_FW_GFX_MEC;
1178 	query_fw.index = 0;
1179 	ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1180 	if (ret)
1181 		return ret;
1182 	seq_printf(m, "MEC feature version: %u, firmware version: 0x%08x\n",
1183 		   fw_info.feature, fw_info.ver);
1184 
1185 	/* MEC2 */
1186 	if (adev->asic_type == CHIP_KAVERI ||
1187 	    (adev->asic_type > CHIP_TOPAZ && adev->asic_type != CHIP_STONEY)) {
1188 		query_fw.index = 1;
1189 		ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1190 		if (ret)
1191 			return ret;
1192 		seq_printf(m, "MEC2 feature version: %u, firmware version: 0x%08x\n",
1193 			   fw_info.feature, fw_info.ver);
1194 	}
1195 
1196 	/* PSP SOS */
1197 	query_fw.fw_type = AMDGPU_INFO_FW_SOS;
1198 	ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1199 	if (ret)
1200 		return ret;
1201 	seq_printf(m, "SOS feature version: %u, firmware version: 0x%08x\n",
1202 		   fw_info.feature, fw_info.ver);
1203 
1204 
1205 	/* PSP ASD */
1206 	query_fw.fw_type = AMDGPU_INFO_FW_ASD;
1207 	ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1208 	if (ret)
1209 		return ret;
1210 	seq_printf(m, "ASD feature version: %u, firmware version: 0x%08x\n",
1211 		   fw_info.feature, fw_info.ver);
1212 
1213 	/* SMC */
1214 	query_fw.fw_type = AMDGPU_INFO_FW_SMC;
1215 	ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1216 	if (ret)
1217 		return ret;
1218 	seq_printf(m, "SMC feature version: %u, firmware version: 0x%08x\n",
1219 		   fw_info.feature, fw_info.ver);
1220 
1221 	/* SDMA */
1222 	query_fw.fw_type = AMDGPU_INFO_FW_SDMA;
1223 	for (i = 0; i < adev->sdma.num_instances; i++) {
1224 		query_fw.index = i;
1225 		ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1226 		if (ret)
1227 			return ret;
1228 		seq_printf(m, "SDMA%d feature version: %u, firmware version: 0x%08x\n",
1229 			   i, fw_info.feature, fw_info.ver);
1230 	}
1231 
1232 	/* VCN */
1233 	query_fw.fw_type = AMDGPU_INFO_FW_VCN;
1234 	ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1235 	if (ret)
1236 		return ret;
1237 	seq_printf(m, "VCN feature version: %u, firmware version: 0x%08x\n",
1238 		   fw_info.feature, fw_info.ver);
1239 
1240 
1241 	seq_printf(m, "VBIOS version: %s\n", ctx->vbios_version);
1242 
1243 	return 0;
1244 }
1245 
1246 static const struct drm_info_list amdgpu_firmware_info_list[] = {
1247 	{"amdgpu_firmware_info", amdgpu_debugfs_firmware_info, 0, NULL},
1248 };
1249 #endif
1250 
1251 int amdgpu_debugfs_firmware_init(struct amdgpu_device *adev)
1252 {
1253 #if defined(CONFIG_DEBUG_FS)
1254 	return amdgpu_debugfs_add_files(adev, amdgpu_firmware_info_list,
1255 					ARRAY_SIZE(amdgpu_firmware_info_list));
1256 #else
1257 	return 0;
1258 #endif
1259 }
1260