xref: /netbsd-src/sys/external/bsd/drm2/dist/drm/amd/amdgpu/amdgpu_amdkfd.c (revision 82d56013d7b633d116a93943de88e08335357a7c)
1 /*	$NetBSD: amdgpu_amdkfd.c,v 1.3 2018/08/27 14:04:50 riastradh Exp $	*/
2 
3 /*
4  * Copyright 2014 Advanced Micro Devices, Inc.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  */
24 
25 #include <sys/cdefs.h>
26 __KERNEL_RCSID(0, "$NetBSD: amdgpu_amdkfd.c,v 1.3 2018/08/27 14:04:50 riastradh Exp $");
27 
28 #include "amdgpu_amdkfd.h"
29 #include "amd_shared.h"
30 #include <drm/drmP.h>
31 #include "amdgpu.h"
32 #include <linux/module.h>
33 
34 const struct kfd2kgd_calls *kfd2kgd;
35 const struct kgd2kfd_calls *kgd2kfd;
36 bool (*kgd2kfd_init_p)(unsigned, const struct kgd2kfd_calls**);
37 
38 bool amdgpu_amdkfd_init(void)
39 {
40 #if defined(CONFIG_HSA_AMD_MODULE)
41 	bool (*kgd2kfd_init_p)(unsigned, const struct kgd2kfd_calls**);
42 
43 	kgd2kfd_init_p = symbol_request(kgd2kfd_init);
44 
45 	if (kgd2kfd_init_p == NULL)
46 		return false;
47 #endif
48 	return true;
49 }
50 
51 bool amdgpu_amdkfd_load_interface(struct amdgpu_device *rdev)
52 {
53 #if defined(CONFIG_HSA_AMD_MODULE)
54 	bool (*kgd2kfd_init_p)(unsigned, const struct kgd2kfd_calls**);
55 #endif
56 
57 	switch (rdev->asic_type) {
58 #ifdef CONFIG_DRM_AMDGPU_CIK
59 	case CHIP_KAVERI:
60 		kfd2kgd = amdgpu_amdkfd_gfx_7_get_functions();
61 		break;
62 #endif
63 	case CHIP_CARRIZO:
64 		kfd2kgd = amdgpu_amdkfd_gfx_8_0_get_functions();
65 		break;
66 	default:
67 		return false;
68 	}
69 
70 #if defined(CONFIG_HSA_AMD_MODULE)
71 	kgd2kfd_init_p = symbol_request(kgd2kfd_init);
72 
73 	if (kgd2kfd_init_p == NULL) {
74 		kfd2kgd = NULL;
75 		return false;
76 	}
77 
78 	if (!kgd2kfd_init_p(KFD_INTERFACE_VERSION, &kgd2kfd)) {
79 		symbol_put(kgd2kfd_init);
80 		kfd2kgd = NULL;
81 		kgd2kfd = NULL;
82 
83 		return false;
84 	}
85 
86 	return true;
87 #elif defined(CONFIG_HSA_AMD)
88 	if (!kgd2kfd_init(KFD_INTERFACE_VERSION, &kgd2kfd)) {
89 		kfd2kgd = NULL;
90 		kgd2kfd = NULL;
91 		return false;
92 	}
93 
94 	return true;
95 #else
96 	kfd2kgd = NULL;
97 	return false;
98 #endif
99 }
100 
101 void amdgpu_amdkfd_fini(void)
102 {
103 	if (kgd2kfd) {
104 		kgd2kfd->exit();
105 #ifndef __NetBSD__
106 		symbol_put(kgd2kfd_init);
107 #endif
108 	}
109 }
110 
111 void amdgpu_amdkfd_device_probe(struct amdgpu_device *rdev)
112 {
113 	if (kgd2kfd)
114 		rdev->kfd = kgd2kfd->probe((struct kgd_dev *)rdev,
115 					rdev->pdev, kfd2kgd);
116 }
117 
118 void amdgpu_amdkfd_device_init(struct amdgpu_device *rdev)
119 {
120 	if (rdev->kfd) {
121 		struct kgd2kfd_shared_resources gpu_resources = {
122 			.compute_vmid_bitmap = 0xFF00,
123 
124 			.first_compute_pipe = 1,
125 			.compute_pipe_count = 4 - 1,
126 		};
127 
128 		amdgpu_doorbell_get_kfd_info(rdev,
129 				&gpu_resources.doorbell_physical_address,
130 				&gpu_resources.doorbell_aperture_size,
131 				&gpu_resources.doorbell_start_offset);
132 
133 		kgd2kfd->device_init(rdev->kfd, &gpu_resources);
134 	}
135 }
136 
137 void amdgpu_amdkfd_device_fini(struct amdgpu_device *rdev)
138 {
139 	if (rdev->kfd) {
140 		kgd2kfd->device_exit(rdev->kfd);
141 		rdev->kfd = NULL;
142 	}
143 }
144 
145 void amdgpu_amdkfd_interrupt(struct amdgpu_device *rdev,
146 		const void *ih_ring_entry)
147 {
148 	if (rdev->kfd)
149 		kgd2kfd->interrupt(rdev->kfd, ih_ring_entry);
150 }
151 
152 void amdgpu_amdkfd_suspend(struct amdgpu_device *rdev)
153 {
154 	if (rdev->kfd)
155 		kgd2kfd->suspend(rdev->kfd);
156 }
157 
158 int amdgpu_amdkfd_resume(struct amdgpu_device *rdev)
159 {
160 	int r = 0;
161 
162 	if (rdev->kfd)
163 		r = kgd2kfd->resume(rdev->kfd);
164 
165 	return r;
166 }
167 
168 #ifndef __NetBSD__		/* XXX unused? */
169 u32 pool_to_domain(enum kgd_memory_pool p)
170 {
171 	switch (p) {
172 	case KGD_POOL_FRAMEBUFFER: return AMDGPU_GEM_DOMAIN_VRAM;
173 	default: return AMDGPU_GEM_DOMAIN_GTT;
174 	}
175 }
176 #endif
177 
178 int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
179 			void **mem_obj, uint64_t *gpu_addr,
180 			void **cpu_ptr)
181 {
182 	struct amdgpu_device *rdev = (struct amdgpu_device *)kgd;
183 	struct kgd_mem **mem = (struct kgd_mem **) mem_obj;
184 	int r;
185 
186 	BUG_ON(kgd == NULL);
187 	BUG_ON(gpu_addr == NULL);
188 	BUG_ON(cpu_ptr == NULL);
189 
190 	*mem = kmalloc(sizeof(struct kgd_mem), GFP_KERNEL);
191 	if ((*mem) == NULL)
192 		return -ENOMEM;
193 
194 	r = amdgpu_bo_create(rdev, size, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_GTT,
195 			     AMDGPU_GEM_CREATE_CPU_GTT_USWC, NULL, NULL, &(*mem)->bo);
196 	if (r) {
197 		dev_err(rdev->dev,
198 			"failed to allocate BO for amdkfd (%d)\n", r);
199 		return r;
200 	}
201 
202 	/* map the buffer */
203 	r = amdgpu_bo_reserve((*mem)->bo, true);
204 	if (r) {
205 		dev_err(rdev->dev, "(%d) failed to reserve bo for amdkfd\n", r);
206 		goto allocate_mem_reserve_bo_failed;
207 	}
208 
209 	r = amdgpu_bo_pin((*mem)->bo, AMDGPU_GEM_DOMAIN_GTT,
210 				&(*mem)->gpu_addr);
211 	if (r) {
212 		dev_err(rdev->dev, "(%d) failed to pin bo for amdkfd\n", r);
213 		goto allocate_mem_pin_bo_failed;
214 	}
215 	*gpu_addr = (*mem)->gpu_addr;
216 
217 	r = amdgpu_bo_kmap((*mem)->bo, &(*mem)->cpu_ptr);
218 	if (r) {
219 		dev_err(rdev->dev,
220 			"(%d) failed to map bo to kernel for amdkfd\n", r);
221 		goto allocate_mem_kmap_bo_failed;
222 	}
223 	*cpu_ptr = (*mem)->cpu_ptr;
224 
225 	amdgpu_bo_unreserve((*mem)->bo);
226 
227 	return 0;
228 
229 allocate_mem_kmap_bo_failed:
230 	amdgpu_bo_unpin((*mem)->bo);
231 allocate_mem_pin_bo_failed:
232 	amdgpu_bo_unreserve((*mem)->bo);
233 allocate_mem_reserve_bo_failed:
234 	amdgpu_bo_unref(&(*mem)->bo);
235 
236 	return r;
237 }
238 
239 void free_gtt_mem(struct kgd_dev *kgd, void *mem_obj)
240 {
241 	struct kgd_mem *mem = (struct kgd_mem *) mem_obj;
242 
243 	BUG_ON(mem == NULL);
244 
245 	amdgpu_bo_reserve(mem->bo, true);
246 	amdgpu_bo_kunmap(mem->bo);
247 	amdgpu_bo_unpin(mem->bo);
248 	amdgpu_bo_unreserve(mem->bo);
249 	amdgpu_bo_unref(&(mem->bo));
250 	kfree(mem);
251 }
252 
253 uint64_t get_vmem_size(struct kgd_dev *kgd)
254 {
255 	struct amdgpu_device *rdev =
256 		(struct amdgpu_device *)kgd;
257 
258 	BUG_ON(kgd == NULL);
259 
260 	return rdev->mc.real_vram_size;
261 }
262 
263 uint64_t get_gpu_clock_counter(struct kgd_dev *kgd)
264 {
265 	struct amdgpu_device *rdev = (struct amdgpu_device *)kgd;
266 
267 	if (rdev->asic_funcs->get_gpu_clock_counter)
268 		return rdev->asic_funcs->get_gpu_clock_counter(rdev);
269 	return 0;
270 }
271 
272 uint32_t get_max_engine_clock_in_mhz(struct kgd_dev *kgd)
273 {
274 	struct amdgpu_device *rdev = (struct amdgpu_device *)kgd;
275 
276 	/* The sclk is in quantas of 10kHz */
277 	return rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk / 100;
278 }
279