xref: /netbsd-src/sys/external/bsd/drm2/dist/drm/amd/amdgpu/amdgpu_acp.c (revision e4a580baf2598beeaae98d953ac7635b8700b80c)
1 /*	$NetBSD: amdgpu_acp.c,v 1.3 2021/12/19 10:59:01 riastradh Exp $	*/
2 
3 /*
4  * Copyright 2015 Advanced Micro Devices, Inc.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: AMD
25  *
26  */
27 
28 #include <sys/cdefs.h>
29 __KERNEL_RCSID(0, "$NetBSD: amdgpu_acp.c,v 1.3 2021/12/19 10:59:01 riastradh Exp $");
30 
31 #include <linux/irqdomain.h>
32 #include <linux/pci.h>
33 #include <linux/pm_domain.h>
34 #include <linux/platform_device.h>
35 #include <sound/designware_i2s.h>
36 #include <sound/pcm.h>
37 
38 #include "amdgpu.h"
39 #include "atom.h"
40 #include "amdgpu_acp.h"
41 
42 #include "acp_gfx_if.h"
43 
44 #define ACP_TILE_ON_MASK                	0x03
45 #define ACP_TILE_OFF_MASK               	0x02
46 #define ACP_TILE_ON_RETAIN_REG_MASK     	0x1f
47 #define ACP_TILE_OFF_RETAIN_REG_MASK    	0x20
48 
49 #define ACP_TILE_P1_MASK                	0x3e
50 #define ACP_TILE_P2_MASK                	0x3d
51 #define ACP_TILE_DSP0_MASK              	0x3b
52 #define ACP_TILE_DSP1_MASK              	0x37
53 
54 #define ACP_TILE_DSP2_MASK              	0x2f
55 
56 #define ACP_DMA_REGS_END			0x146c0
57 #define ACP_I2S_PLAY_REGS_START			0x14840
58 #define ACP_I2S_PLAY_REGS_END			0x148b4
59 #define ACP_I2S_CAP_REGS_START			0x148b8
60 #define ACP_I2S_CAP_REGS_END			0x1496c
61 
62 #define ACP_I2S_COMP1_CAP_REG_OFFSET		0xac
63 #define ACP_I2S_COMP2_CAP_REG_OFFSET		0xa8
64 #define ACP_I2S_COMP1_PLAY_REG_OFFSET		0x6c
65 #define ACP_I2S_COMP2_PLAY_REG_OFFSET		0x68
66 #define ACP_BT_PLAY_REGS_START			0x14970
67 #define ACP_BT_PLAY_REGS_END			0x14a24
68 #define ACP_BT_COMP1_REG_OFFSET			0xac
69 #define ACP_BT_COMP2_REG_OFFSET			0xa8
70 
71 #define mmACP_PGFSM_RETAIN_REG			0x51c9
72 #define mmACP_PGFSM_CONFIG_REG			0x51ca
73 #define mmACP_PGFSM_READ_REG_0			0x51cc
74 
75 #define mmACP_MEM_SHUT_DOWN_REQ_LO		0x51f8
76 #define mmACP_MEM_SHUT_DOWN_REQ_HI		0x51f9
77 #define mmACP_MEM_SHUT_DOWN_STS_LO		0x51fa
78 #define mmACP_MEM_SHUT_DOWN_STS_HI		0x51fb
79 
80 #define mmACP_CONTROL				0x5131
81 #define mmACP_STATUS				0x5133
82 #define mmACP_SOFT_RESET			0x5134
83 #define ACP_CONTROL__ClkEn_MASK 		0x1
84 #define ACP_SOFT_RESET__SoftResetAud_MASK 	0x100
85 #define ACP_SOFT_RESET__SoftResetAudDone_MASK	0x1000000
86 #define ACP_CLOCK_EN_TIME_OUT_VALUE		0x000000FF
87 #define ACP_SOFT_RESET_DONE_TIME_OUT_VALUE	0x000000FF
88 
89 #define ACP_TIMEOUT_LOOP			0x000000FF
90 #define ACP_DEVS				4
91 #define ACP_SRC_ID				162
92 
93 enum {
94 	ACP_TILE_P1 = 0,
95 	ACP_TILE_P2,
96 	ACP_TILE_DSP0,
97 	ACP_TILE_DSP1,
98 	ACP_TILE_DSP2,
99 };
100 
acp_sw_init(void * handle)101 static int acp_sw_init(void *handle)
102 {
103 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
104 
105 	adev->acp.parent = adev->dev;
106 
107 	adev->acp.cgs_device =
108 		amdgpu_cgs_create_device(adev);
109 	if (!adev->acp.cgs_device)
110 		return -EINVAL;
111 
112 	return 0;
113 }
114 
acp_sw_fini(void * handle)115 static int acp_sw_fini(void *handle)
116 {
117 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
118 
119 	if (adev->acp.cgs_device)
120 		amdgpu_cgs_destroy_device(adev->acp.cgs_device);
121 
122 	return 0;
123 }
124 
125 #ifndef __NetBSD__		/* XXX amdgpu pm */
126 
127 struct acp_pm_domain {
128 	void *adev;
129 	struct generic_pm_domain gpd;
130 };
131 
acp_poweroff(struct generic_pm_domain * genpd)132 static int acp_poweroff(struct generic_pm_domain *genpd)
133 {
134 	struct acp_pm_domain *apd;
135 	struct amdgpu_device *adev;
136 
137 	apd = container_of(genpd, struct acp_pm_domain, gpd);
138 	if (apd != NULL) {
139 		adev = apd->adev;
140 	/* call smu to POWER GATE ACP block
141 	 * smu will
142 	 * 1. turn off the acp clock
143 	 * 2. power off the acp tiles
144 	 * 3. check and enter ulv state
145 	 */
146 		if (adev->powerplay.pp_funcs &&
147 			adev->powerplay.pp_funcs->set_powergating_by_smu)
148 			amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, true);
149 	}
150 	return 0;
151 }
152 
acp_poweron(struct generic_pm_domain * genpd)153 static int acp_poweron(struct generic_pm_domain *genpd)
154 {
155 	struct acp_pm_domain *apd;
156 	struct amdgpu_device *adev;
157 
158 	apd = container_of(genpd, struct acp_pm_domain, gpd);
159 	if (apd != NULL) {
160 		adev = apd->adev;
161 	/* call smu to UNGATE ACP block
162 	 * smu will
163 	 * 1. exit ulv
164 	 * 2. turn on acp clock
165 	 * 3. power on acp tiles
166 	 */
167 		if (adev->powerplay.pp_funcs->set_powergating_by_smu)
168 			amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, false);
169 	}
170 	return 0;
171 }
172 
get_mfd_cell_dev(const char * device_name,int r)173 static struct device *get_mfd_cell_dev(const char *device_name, int r)
174 {
175 	char auto_dev_name[25];
176 	struct device *dev;
177 
178 	snprintf(auto_dev_name, sizeof(auto_dev_name),
179 		 "%s.%d.auto", device_name, r);
180 	dev = bus_find_device_by_name(&platform_bus_type, NULL, auto_dev_name);
181 	dev_info(dev, "device %s added to pm domain\n", auto_dev_name);
182 
183 	return dev;
184 }
185 
186 #endif
187 
188 /**
189  * acp_hw_init - start and test ACP block
190  *
191  * @adev: amdgpu_device pointer
192  *
193  */
acp_hw_init(void * handle)194 static int acp_hw_init(void *handle)
195 {
196 	int r, i;
197 	uint64_t acp_base;
198 	u32 val = 0;
199 	u32 count = 0;
200 	struct device *dev;
201 	struct i2s_platform_data *i2s_pdata = NULL;
202 
203 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
204 
205 	const struct amdgpu_ip_block *ip_block =
206 		amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_ACP);
207 
208 	if (!ip_block)
209 		return -EINVAL;
210 
211 	r = amd_acp_hw_init(adev->acp.cgs_device,
212 			    ip_block->version->major, ip_block->version->minor);
213 	/* -ENODEV means board uses AZ rather than ACP */
214 	if (r == -ENODEV) {
215 		amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, true);
216 		return 0;
217 	} else if (r) {
218 		return r;
219 	}
220 
221 	if (adev->rmmio_size == 0 || adev->rmmio_size < 0x5289)
222 		return -EINVAL;
223 
224 	acp_base = adev->rmmio_base;
225 
226 
227 #ifndef __NetBSD__		/* XXX amdgpu pm */
228 	adev->acp.acp_genpd = kzalloc(sizeof(struct acp_pm_domain), GFP_KERNEL);
229 	if (adev->acp.acp_genpd == NULL)
230 		return -ENOMEM;
231 
232 	adev->acp.acp_genpd->gpd.name = "ACP_AUDIO";
233 	adev->acp.acp_genpd->gpd.power_off = acp_poweroff;
234 	adev->acp.acp_genpd->gpd.power_on = acp_poweron;
235 
236 
237 	adev->acp.acp_genpd->adev = adev;
238 
239 	pm_genpd_init(&adev->acp.acp_genpd->gpd, NULL, false);
240 #endif
241 
242 #ifndef __NetBSD__		/* XXX amdgpu cell */
243 	adev->acp.acp_cell = kcalloc(ACP_DEVS, sizeof(struct mfd_cell),
244 							GFP_KERNEL);
245 
246 	if (adev->acp.acp_cell == NULL) {
247 		r = -ENOMEM;
248 		goto failure;
249 	}
250 #endif
251 
252 	adev->acp.acp_res = kcalloc(5, sizeof(struct resource), GFP_KERNEL);
253 	if (adev->acp.acp_res == NULL) {
254 		r = -ENOMEM;
255 		goto failure;
256 	}
257 
258 #ifdef __NetBSD__		/* XXX amdgpu sound */
259 	__USE(i2s_pdata);
260 #else
261 	i2s_pdata = kcalloc(3, sizeof(struct i2s_platform_data), GFP_KERNEL);
262 	if (i2s_pdata == NULL) {
263 		r = -ENOMEM;
264 		goto failure;
265 	}
266 
267 	switch (adev->asic_type) {
268 	case CHIP_STONEY:
269 		i2s_pdata[0].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET |
270 			DW_I2S_QUIRK_16BIT_IDX_OVERRIDE;
271 		break;
272 	default:
273 		i2s_pdata[0].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET;
274 	}
275 	i2s_pdata[0].cap = DWC_I2S_PLAY;
276 	i2s_pdata[0].snd_rates = SNDRV_PCM_RATE_8000_96000;
277 	i2s_pdata[0].i2s_reg_comp1 = ACP_I2S_COMP1_PLAY_REG_OFFSET;
278 	i2s_pdata[0].i2s_reg_comp2 = ACP_I2S_COMP2_PLAY_REG_OFFSET;
279 	switch (adev->asic_type) {
280 	case CHIP_STONEY:
281 		i2s_pdata[1].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET |
282 			DW_I2S_QUIRK_COMP_PARAM1 |
283 			DW_I2S_QUIRK_16BIT_IDX_OVERRIDE;
284 		break;
285 	default:
286 		i2s_pdata[1].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET |
287 			DW_I2S_QUIRK_COMP_PARAM1;
288 	}
289 
290 	i2s_pdata[1].cap = DWC_I2S_RECORD;
291 	i2s_pdata[1].snd_rates = SNDRV_PCM_RATE_8000_96000;
292 	i2s_pdata[1].i2s_reg_comp1 = ACP_I2S_COMP1_CAP_REG_OFFSET;
293 	i2s_pdata[1].i2s_reg_comp2 = ACP_I2S_COMP2_CAP_REG_OFFSET;
294 
295 	i2s_pdata[2].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET;
296 	switch (adev->asic_type) {
297 	case CHIP_STONEY:
298 		i2s_pdata[2].quirks |= DW_I2S_QUIRK_16BIT_IDX_OVERRIDE;
299 		break;
300 	default:
301 		break;
302 	}
303 
304 	i2s_pdata[2].cap = DWC_I2S_PLAY | DWC_I2S_RECORD;
305 	i2s_pdata[2].snd_rates = SNDRV_PCM_RATE_8000_96000;
306 	i2s_pdata[2].i2s_reg_comp1 = ACP_BT_COMP1_REG_OFFSET;
307 	i2s_pdata[2].i2s_reg_comp2 = ACP_BT_COMP2_REG_OFFSET;
308 #endif
309 
310 	adev->acp.acp_res[0].name = "acp2x_dma";
311 	adev->acp.acp_res[0].flags = IORESOURCE_MEM;
312 	adev->acp.acp_res[0].start = acp_base;
313 	adev->acp.acp_res[0].end = acp_base + ACP_DMA_REGS_END;
314 
315 	adev->acp.acp_res[1].name = "acp2x_dw_i2s_play";
316 	adev->acp.acp_res[1].flags = IORESOURCE_MEM;
317 	adev->acp.acp_res[1].start = acp_base + ACP_I2S_PLAY_REGS_START;
318 	adev->acp.acp_res[1].end = acp_base + ACP_I2S_PLAY_REGS_END;
319 
320 	adev->acp.acp_res[2].name = "acp2x_dw_i2s_cap";
321 	adev->acp.acp_res[2].flags = IORESOURCE_MEM;
322 	adev->acp.acp_res[2].start = acp_base + ACP_I2S_CAP_REGS_START;
323 	adev->acp.acp_res[2].end = acp_base + ACP_I2S_CAP_REGS_END;
324 
325 	adev->acp.acp_res[3].name = "acp2x_dw_bt_i2s_play_cap";
326 	adev->acp.acp_res[3].flags = IORESOURCE_MEM;
327 	adev->acp.acp_res[3].start = acp_base + ACP_BT_PLAY_REGS_START;
328 	adev->acp.acp_res[3].end = acp_base + ACP_BT_PLAY_REGS_END;
329 
330 	adev->acp.acp_res[4].name = "acp2x_dma_irq";
331 	adev->acp.acp_res[4].flags = IORESOURCE_IRQ;
332 	adev->acp.acp_res[4].start = amdgpu_irq_create_mapping(adev, 162);
333 	adev->acp.acp_res[4].end = adev->acp.acp_res[4].start;
334 
335 #ifdef __NetBSD__		/* XXX amdgpu cell */
336 	__USE(dev);
337 	__USE(i);
338 #else
339 	adev->acp.acp_cell[0].name = "acp_audio_dma";
340 	adev->acp.acp_cell[0].num_resources = 5;
341 	adev->acp.acp_cell[0].resources = &adev->acp.acp_res[0];
342 	adev->acp.acp_cell[0].platform_data = &adev->asic_type;
343 	adev->acp.acp_cell[0].pdata_size = sizeof(adev->asic_type);
344 
345 	adev->acp.acp_cell[1].name = "designware-i2s";
346 	adev->acp.acp_cell[1].num_resources = 1;
347 	adev->acp.acp_cell[1].resources = &adev->acp.acp_res[1];
348 	adev->acp.acp_cell[1].platform_data = &i2s_pdata[0];
349 	adev->acp.acp_cell[1].pdata_size = sizeof(struct i2s_platform_data);
350 
351 	adev->acp.acp_cell[2].name = "designware-i2s";
352 	adev->acp.acp_cell[2].num_resources = 1;
353 	adev->acp.acp_cell[2].resources = &adev->acp.acp_res[2];
354 	adev->acp.acp_cell[2].platform_data = &i2s_pdata[1];
355 	adev->acp.acp_cell[2].pdata_size = sizeof(struct i2s_platform_data);
356 
357 	adev->acp.acp_cell[3].name = "designware-i2s";
358 	adev->acp.acp_cell[3].num_resources = 1;
359 	adev->acp.acp_cell[3].resources = &adev->acp.acp_res[3];
360 	adev->acp.acp_cell[3].platform_data = &i2s_pdata[2];
361 	adev->acp.acp_cell[3].pdata_size = sizeof(struct i2s_platform_data);
362 
363 	r = mfd_add_hotplug_devices(adev->acp.parent, adev->acp.acp_cell,
364 								ACP_DEVS);
365 	if (r)
366 		goto failure;
367 
368 	for (i = 0; i < ACP_DEVS ; i++) {
369 		dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i);
370 		r = pm_genpd_add_device(&adev->acp.acp_genpd->gpd, dev);
371 		if (r) {
372 			dev_err(dev, "Failed to add dev to genpd\n");
373 			goto failure;
374 		}
375 	}
376 #endif
377 
378 	/* Assert Soft reset of ACP */
379 	val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET);
380 
381 	val |= ACP_SOFT_RESET__SoftResetAud_MASK;
382 	cgs_write_register(adev->acp.cgs_device, mmACP_SOFT_RESET, val);
383 
384 	count = ACP_SOFT_RESET_DONE_TIME_OUT_VALUE;
385 	while (true) {
386 		val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET);
387 		if (ACP_SOFT_RESET__SoftResetAudDone_MASK ==
388 		    (val & ACP_SOFT_RESET__SoftResetAudDone_MASK))
389 			break;
390 		if (--count == 0) {
391 			dev_err(pci_dev_dev(adev->pdev), "Failed to reset ACP\n");
392 			r = -ETIMEDOUT;
393 			goto failure;
394 		}
395 		udelay(100);
396 	}
397 	/* Enable clock to ACP and wait until the clock is enabled */
398 	val = cgs_read_register(adev->acp.cgs_device, mmACP_CONTROL);
399 	val = val | ACP_CONTROL__ClkEn_MASK;
400 	cgs_write_register(adev->acp.cgs_device, mmACP_CONTROL, val);
401 
402 	count = ACP_CLOCK_EN_TIME_OUT_VALUE;
403 
404 	while (true) {
405 		val = cgs_read_register(adev->acp.cgs_device, mmACP_STATUS);
406 		if (val & (u32) 0x1)
407 			break;
408 		if (--count == 0) {
409 			dev_err(pci_dev_dev(adev->pdev), "Failed to reset ACP\n");
410 			r = -ETIMEDOUT;
411 			goto failure;
412 		}
413 		udelay(100);
414 	}
415 	/* Deassert the SOFT RESET flags */
416 	val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET);
417 	val &= ~ACP_SOFT_RESET__SoftResetAud_MASK;
418 	cgs_write_register(adev->acp.cgs_device, mmACP_SOFT_RESET, val);
419 	return 0;
420 
421 failure:
422 	kfree(i2s_pdata);
423 	kfree(adev->acp.acp_res);
424 	kfree(adev->acp.acp_cell);
425 	kfree(adev->acp.acp_genpd);
426 	return r;
427 }
428 
429 /**
430  * acp_hw_fini - stop the hardware block
431  *
432  * @adev: amdgpu_device pointer
433  *
434  */
acp_hw_fini(void * handle)435 static int acp_hw_fini(void *handle)
436 {
437 	int i, ret;
438 	u32 val = 0;
439 	u32 count = 0;
440 	struct device *dev;
441 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
442 
443 	/* return early if no ACP */
444 	if (!adev->acp.acp_genpd) {
445 		amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, false);
446 		return 0;
447 	}
448 
449 	/* Assert Soft reset of ACP */
450 	val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET);
451 
452 	val |= ACP_SOFT_RESET__SoftResetAud_MASK;
453 	cgs_write_register(adev->acp.cgs_device, mmACP_SOFT_RESET, val);
454 
455 	count = ACP_SOFT_RESET_DONE_TIME_OUT_VALUE;
456 	while (true) {
457 		val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET);
458 		if (ACP_SOFT_RESET__SoftResetAudDone_MASK ==
459 		    (val & ACP_SOFT_RESET__SoftResetAudDone_MASK))
460 			break;
461 		if (--count == 0) {
462 			dev_err(pci_dev_dev(adev->pdev), "Failed to reset ACP\n");
463 			return -ETIMEDOUT;
464 		}
465 		udelay(100);
466 	}
467 	/* Disable ACP clock */
468 	val = cgs_read_register(adev->acp.cgs_device, mmACP_CONTROL);
469 	val &= ~ACP_CONTROL__ClkEn_MASK;
470 	cgs_write_register(adev->acp.cgs_device, mmACP_CONTROL, val);
471 
472 	count = ACP_CLOCK_EN_TIME_OUT_VALUE;
473 
474 	while (true) {
475 		val = cgs_read_register(adev->acp.cgs_device, mmACP_STATUS);
476 		if (val & (u32) 0x1)
477 			break;
478 		if (--count == 0) {
479 			dev_err(pci_dev_dev(adev->pdev), "Failed to reset ACP\n");
480 			return -ETIMEDOUT;
481 		}
482 		udelay(100);
483 	}
484 
485 #ifdef __NetBSD__		/* XXX amdgpu pm */
486 	__USE(dev);
487 	__USE(i);
488 	__USE(ret);
489 #else
490 	for (i = 0; i < ACP_DEVS ; i++) {
491 		dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i);
492 		ret = pm_genpd_remove_device(dev);
493 		/* If removal fails, dont giveup and try rest */
494 		if (ret)
495 			dev_err(dev, "remove dev from genpd failed\n");
496 	}
497 
498 	mfd_remove_devices(adev->acp.parent);
499 #endif
500 	kfree(adev->acp.acp_res);
501 	kfree(adev->acp.acp_genpd);
502 	kfree(adev->acp.acp_cell);
503 
504 	return 0;
505 }
506 
acp_suspend(void * handle)507 static int acp_suspend(void *handle)
508 {
509 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
510 
511 	/* power up on suspend */
512 	if (!adev->acp.acp_cell)
513 		amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, false);
514 	return 0;
515 }
516 
acp_resume(void * handle)517 static int acp_resume(void *handle)
518 {
519 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
520 
521 	/* power down again on resume */
522 	if (!adev->acp.acp_cell)
523 		amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, true);
524 	return 0;
525 }
526 
acp_early_init(void * handle)527 static int acp_early_init(void *handle)
528 {
529 	return 0;
530 }
531 
acp_is_idle(void * handle)532 static bool acp_is_idle(void *handle)
533 {
534 	return true;
535 }
536 
acp_wait_for_idle(void * handle)537 static int acp_wait_for_idle(void *handle)
538 {
539 	return 0;
540 }
541 
acp_soft_reset(void * handle)542 static int acp_soft_reset(void *handle)
543 {
544 	return 0;
545 }
546 
acp_set_clockgating_state(void * handle,enum amd_clockgating_state state)547 static int acp_set_clockgating_state(void *handle,
548 				     enum amd_clockgating_state state)
549 {
550 	return 0;
551 }
552 
acp_set_powergating_state(void * handle,enum amd_powergating_state state)553 static int acp_set_powergating_state(void *handle,
554 				     enum amd_powergating_state state)
555 {
556 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
557 	bool enable = (state == AMD_PG_STATE_GATE);
558 
559 	if (adev->powerplay.pp_funcs &&
560 		adev->powerplay.pp_funcs->set_powergating_by_smu)
561 		amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, enable);
562 
563 	return 0;
564 }
565 
566 static const struct amd_ip_funcs acp_ip_funcs = {
567 	.name = "acp_ip",
568 	.early_init = acp_early_init,
569 	.late_init = NULL,
570 	.sw_init = acp_sw_init,
571 	.sw_fini = acp_sw_fini,
572 	.hw_init = acp_hw_init,
573 	.hw_fini = acp_hw_fini,
574 	.suspend = acp_suspend,
575 	.resume = acp_resume,
576 	.is_idle = acp_is_idle,
577 	.wait_for_idle = acp_wait_for_idle,
578 	.soft_reset = acp_soft_reset,
579 	.set_clockgating_state = acp_set_clockgating_state,
580 	.set_powergating_state = acp_set_powergating_state,
581 };
582 
583 const struct amdgpu_ip_block_version acp_ip_block =
584 {
585 	.type = AMD_IP_BLOCK_TYPE_ACP,
586 	.major = 2,
587 	.minor = 2,
588 	.rev = 0,
589 	.funcs = &acp_ip_funcs,
590 };
591