xref: /netbsd-src/sys/external/bsd/drm2/dist/drm/amd/amdgpu/amdgpu_acp.c (revision 41ec02673d281bbb3d38e6c78504ce6e30c228c1)
1 /*	$NetBSD: amdgpu_acp.c,v 1.2 2021/12/18 23:44:58 riastradh Exp $	*/
2 
3 /*
4  * Copyright 2015 Advanced Micro Devices, Inc.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: AMD
25  *
26  */
27 
28 #include <sys/cdefs.h>
29 __KERNEL_RCSID(0, "$NetBSD: amdgpu_acp.c,v 1.2 2021/12/18 23:44:58 riastradh Exp $");
30 
31 #include <linux/irqdomain.h>
32 #include <linux/pci.h>
33 #include <linux/pm_domain.h>
34 #include <linux/platform_device.h>
35 #include <sound/designware_i2s.h>
36 #include <sound/pcm.h>
37 
38 #include "amdgpu.h"
39 #include "atom.h"
40 #include "amdgpu_acp.h"
41 
42 #include "acp_gfx_if.h"
43 
44 #define ACP_TILE_ON_MASK                	0x03
45 #define ACP_TILE_OFF_MASK               	0x02
46 #define ACP_TILE_ON_RETAIN_REG_MASK     	0x1f
47 #define ACP_TILE_OFF_RETAIN_REG_MASK    	0x20
48 
49 #define ACP_TILE_P1_MASK                	0x3e
50 #define ACP_TILE_P2_MASK                	0x3d
51 #define ACP_TILE_DSP0_MASK              	0x3b
52 #define ACP_TILE_DSP1_MASK              	0x37
53 
54 #define ACP_TILE_DSP2_MASK              	0x2f
55 
56 #define ACP_DMA_REGS_END			0x146c0
57 #define ACP_I2S_PLAY_REGS_START			0x14840
58 #define ACP_I2S_PLAY_REGS_END			0x148b4
59 #define ACP_I2S_CAP_REGS_START			0x148b8
60 #define ACP_I2S_CAP_REGS_END			0x1496c
61 
62 #define ACP_I2S_COMP1_CAP_REG_OFFSET		0xac
63 #define ACP_I2S_COMP2_CAP_REG_OFFSET		0xa8
64 #define ACP_I2S_COMP1_PLAY_REG_OFFSET		0x6c
65 #define ACP_I2S_COMP2_PLAY_REG_OFFSET		0x68
66 #define ACP_BT_PLAY_REGS_START			0x14970
67 #define ACP_BT_PLAY_REGS_END			0x14a24
68 #define ACP_BT_COMP1_REG_OFFSET			0xac
69 #define ACP_BT_COMP2_REG_OFFSET			0xa8
70 
71 #define mmACP_PGFSM_RETAIN_REG			0x51c9
72 #define mmACP_PGFSM_CONFIG_REG			0x51ca
73 #define mmACP_PGFSM_READ_REG_0			0x51cc
74 
75 #define mmACP_MEM_SHUT_DOWN_REQ_LO		0x51f8
76 #define mmACP_MEM_SHUT_DOWN_REQ_HI		0x51f9
77 #define mmACP_MEM_SHUT_DOWN_STS_LO		0x51fa
78 #define mmACP_MEM_SHUT_DOWN_STS_HI		0x51fb
79 
80 #define mmACP_CONTROL				0x5131
81 #define mmACP_STATUS				0x5133
82 #define mmACP_SOFT_RESET			0x5134
83 #define ACP_CONTROL__ClkEn_MASK 		0x1
84 #define ACP_SOFT_RESET__SoftResetAud_MASK 	0x100
85 #define ACP_SOFT_RESET__SoftResetAudDone_MASK	0x1000000
86 #define ACP_CLOCK_EN_TIME_OUT_VALUE		0x000000FF
87 #define ACP_SOFT_RESET_DONE_TIME_OUT_VALUE	0x000000FF
88 
89 #define ACP_TIMEOUT_LOOP			0x000000FF
90 #define ACP_DEVS				4
91 #define ACP_SRC_ID				162
92 
93 enum {
94 	ACP_TILE_P1 = 0,
95 	ACP_TILE_P2,
96 	ACP_TILE_DSP0,
97 	ACP_TILE_DSP1,
98 	ACP_TILE_DSP2,
99 };
100 
101 static int acp_sw_init(void *handle)
102 {
103 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
104 
105 	adev->acp.parent = adev->dev;
106 
107 	adev->acp.cgs_device =
108 		amdgpu_cgs_create_device(adev);
109 	if (!adev->acp.cgs_device)
110 		return -EINVAL;
111 
112 	return 0;
113 }
114 
115 static int acp_sw_fini(void *handle)
116 {
117 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
118 
119 	if (adev->acp.cgs_device)
120 		amdgpu_cgs_destroy_device(adev->acp.cgs_device);
121 
122 	return 0;
123 }
124 
125 struct acp_pm_domain {
126 	void *adev;
127 	struct generic_pm_domain gpd;
128 };
129 
130 static int acp_poweroff(struct generic_pm_domain *genpd)
131 {
132 	struct acp_pm_domain *apd;
133 	struct amdgpu_device *adev;
134 
135 	apd = container_of(genpd, struct acp_pm_domain, gpd);
136 	if (apd != NULL) {
137 		adev = apd->adev;
138 	/* call smu to POWER GATE ACP block
139 	 * smu will
140 	 * 1. turn off the acp clock
141 	 * 2. power off the acp tiles
142 	 * 3. check and enter ulv state
143 	 */
144 		if (adev->powerplay.pp_funcs &&
145 			adev->powerplay.pp_funcs->set_powergating_by_smu)
146 			amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, true);
147 	}
148 	return 0;
149 }
150 
151 static int acp_poweron(struct generic_pm_domain *genpd)
152 {
153 	struct acp_pm_domain *apd;
154 	struct amdgpu_device *adev;
155 
156 	apd = container_of(genpd, struct acp_pm_domain, gpd);
157 	if (apd != NULL) {
158 		adev = apd->adev;
159 	/* call smu to UNGATE ACP block
160 	 * smu will
161 	 * 1. exit ulv
162 	 * 2. turn on acp clock
163 	 * 3. power on acp tiles
164 	 */
165 		if (adev->powerplay.pp_funcs->set_powergating_by_smu)
166 			amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, false);
167 	}
168 	return 0;
169 }
170 
171 static struct device *get_mfd_cell_dev(const char *device_name, int r)
172 {
173 	char auto_dev_name[25];
174 	struct device *dev;
175 
176 	snprintf(auto_dev_name, sizeof(auto_dev_name),
177 		 "%s.%d.auto", device_name, r);
178 	dev = bus_find_device_by_name(&platform_bus_type, NULL, auto_dev_name);
179 	dev_info(dev, "device %s added to pm domain\n", auto_dev_name);
180 
181 	return dev;
182 }
183 
184 /**
185  * acp_hw_init - start and test ACP block
186  *
187  * @adev: amdgpu_device pointer
188  *
189  */
190 static int acp_hw_init(void *handle)
191 {
192 	int r, i;
193 	uint64_t acp_base;
194 	u32 val = 0;
195 	u32 count = 0;
196 	struct device *dev;
197 	struct i2s_platform_data *i2s_pdata = NULL;
198 
199 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
200 
201 	const struct amdgpu_ip_block *ip_block =
202 		amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_ACP);
203 
204 	if (!ip_block)
205 		return -EINVAL;
206 
207 	r = amd_acp_hw_init(adev->acp.cgs_device,
208 			    ip_block->version->major, ip_block->version->minor);
209 	/* -ENODEV means board uses AZ rather than ACP */
210 	if (r == -ENODEV) {
211 		amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, true);
212 		return 0;
213 	} else if (r) {
214 		return r;
215 	}
216 
217 	if (adev->rmmio_size == 0 || adev->rmmio_size < 0x5289)
218 		return -EINVAL;
219 
220 	acp_base = adev->rmmio_base;
221 
222 
223 	adev->acp.acp_genpd = kzalloc(sizeof(struct acp_pm_domain), GFP_KERNEL);
224 	if (adev->acp.acp_genpd == NULL)
225 		return -ENOMEM;
226 
227 	adev->acp.acp_genpd->gpd.name = "ACP_AUDIO";
228 	adev->acp.acp_genpd->gpd.power_off = acp_poweroff;
229 	adev->acp.acp_genpd->gpd.power_on = acp_poweron;
230 
231 
232 	adev->acp.acp_genpd->adev = adev;
233 
234 	pm_genpd_init(&adev->acp.acp_genpd->gpd, NULL, false);
235 
236 	adev->acp.acp_cell = kcalloc(ACP_DEVS, sizeof(struct mfd_cell),
237 							GFP_KERNEL);
238 
239 	if (adev->acp.acp_cell == NULL) {
240 		r = -ENOMEM;
241 		goto failure;
242 	}
243 
244 	adev->acp.acp_res = kcalloc(5, sizeof(struct resource), GFP_KERNEL);
245 	if (adev->acp.acp_res == NULL) {
246 		r = -ENOMEM;
247 		goto failure;
248 	}
249 
250 	i2s_pdata = kcalloc(3, sizeof(struct i2s_platform_data), GFP_KERNEL);
251 	if (i2s_pdata == NULL) {
252 		r = -ENOMEM;
253 		goto failure;
254 	}
255 
256 	switch (adev->asic_type) {
257 	case CHIP_STONEY:
258 		i2s_pdata[0].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET |
259 			DW_I2S_QUIRK_16BIT_IDX_OVERRIDE;
260 		break;
261 	default:
262 		i2s_pdata[0].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET;
263 	}
264 	i2s_pdata[0].cap = DWC_I2S_PLAY;
265 	i2s_pdata[0].snd_rates = SNDRV_PCM_RATE_8000_96000;
266 	i2s_pdata[0].i2s_reg_comp1 = ACP_I2S_COMP1_PLAY_REG_OFFSET;
267 	i2s_pdata[0].i2s_reg_comp2 = ACP_I2S_COMP2_PLAY_REG_OFFSET;
268 	switch (adev->asic_type) {
269 	case CHIP_STONEY:
270 		i2s_pdata[1].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET |
271 			DW_I2S_QUIRK_COMP_PARAM1 |
272 			DW_I2S_QUIRK_16BIT_IDX_OVERRIDE;
273 		break;
274 	default:
275 		i2s_pdata[1].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET |
276 			DW_I2S_QUIRK_COMP_PARAM1;
277 	}
278 
279 	i2s_pdata[1].cap = DWC_I2S_RECORD;
280 	i2s_pdata[1].snd_rates = SNDRV_PCM_RATE_8000_96000;
281 	i2s_pdata[1].i2s_reg_comp1 = ACP_I2S_COMP1_CAP_REG_OFFSET;
282 	i2s_pdata[1].i2s_reg_comp2 = ACP_I2S_COMP2_CAP_REG_OFFSET;
283 
284 	i2s_pdata[2].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET;
285 	switch (adev->asic_type) {
286 	case CHIP_STONEY:
287 		i2s_pdata[2].quirks |= DW_I2S_QUIRK_16BIT_IDX_OVERRIDE;
288 		break;
289 	default:
290 		break;
291 	}
292 
293 	i2s_pdata[2].cap = DWC_I2S_PLAY | DWC_I2S_RECORD;
294 	i2s_pdata[2].snd_rates = SNDRV_PCM_RATE_8000_96000;
295 	i2s_pdata[2].i2s_reg_comp1 = ACP_BT_COMP1_REG_OFFSET;
296 	i2s_pdata[2].i2s_reg_comp2 = ACP_BT_COMP2_REG_OFFSET;
297 
298 	adev->acp.acp_res[0].name = "acp2x_dma";
299 	adev->acp.acp_res[0].flags = IORESOURCE_MEM;
300 	adev->acp.acp_res[0].start = acp_base;
301 	adev->acp.acp_res[0].end = acp_base + ACP_DMA_REGS_END;
302 
303 	adev->acp.acp_res[1].name = "acp2x_dw_i2s_play";
304 	adev->acp.acp_res[1].flags = IORESOURCE_MEM;
305 	adev->acp.acp_res[1].start = acp_base + ACP_I2S_PLAY_REGS_START;
306 	adev->acp.acp_res[1].end = acp_base + ACP_I2S_PLAY_REGS_END;
307 
308 	adev->acp.acp_res[2].name = "acp2x_dw_i2s_cap";
309 	adev->acp.acp_res[2].flags = IORESOURCE_MEM;
310 	adev->acp.acp_res[2].start = acp_base + ACP_I2S_CAP_REGS_START;
311 	adev->acp.acp_res[2].end = acp_base + ACP_I2S_CAP_REGS_END;
312 
313 	adev->acp.acp_res[3].name = "acp2x_dw_bt_i2s_play_cap";
314 	adev->acp.acp_res[3].flags = IORESOURCE_MEM;
315 	adev->acp.acp_res[3].start = acp_base + ACP_BT_PLAY_REGS_START;
316 	adev->acp.acp_res[3].end = acp_base + ACP_BT_PLAY_REGS_END;
317 
318 	adev->acp.acp_res[4].name = "acp2x_dma_irq";
319 	adev->acp.acp_res[4].flags = IORESOURCE_IRQ;
320 	adev->acp.acp_res[4].start = amdgpu_irq_create_mapping(adev, 162);
321 	adev->acp.acp_res[4].end = adev->acp.acp_res[4].start;
322 
323 	adev->acp.acp_cell[0].name = "acp_audio_dma";
324 	adev->acp.acp_cell[0].num_resources = 5;
325 	adev->acp.acp_cell[0].resources = &adev->acp.acp_res[0];
326 	adev->acp.acp_cell[0].platform_data = &adev->asic_type;
327 	adev->acp.acp_cell[0].pdata_size = sizeof(adev->asic_type);
328 
329 	adev->acp.acp_cell[1].name = "designware-i2s";
330 	adev->acp.acp_cell[1].num_resources = 1;
331 	adev->acp.acp_cell[1].resources = &adev->acp.acp_res[1];
332 	adev->acp.acp_cell[1].platform_data = &i2s_pdata[0];
333 	adev->acp.acp_cell[1].pdata_size = sizeof(struct i2s_platform_data);
334 
335 	adev->acp.acp_cell[2].name = "designware-i2s";
336 	adev->acp.acp_cell[2].num_resources = 1;
337 	adev->acp.acp_cell[2].resources = &adev->acp.acp_res[2];
338 	adev->acp.acp_cell[2].platform_data = &i2s_pdata[1];
339 	adev->acp.acp_cell[2].pdata_size = sizeof(struct i2s_platform_data);
340 
341 	adev->acp.acp_cell[3].name = "designware-i2s";
342 	adev->acp.acp_cell[3].num_resources = 1;
343 	adev->acp.acp_cell[3].resources = &adev->acp.acp_res[3];
344 	adev->acp.acp_cell[3].platform_data = &i2s_pdata[2];
345 	adev->acp.acp_cell[3].pdata_size = sizeof(struct i2s_platform_data);
346 
347 	r = mfd_add_hotplug_devices(adev->acp.parent, adev->acp.acp_cell,
348 								ACP_DEVS);
349 	if (r)
350 		goto failure;
351 
352 	for (i = 0; i < ACP_DEVS ; i++) {
353 		dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i);
354 		r = pm_genpd_add_device(&adev->acp.acp_genpd->gpd, dev);
355 		if (r) {
356 			dev_err(dev, "Failed to add dev to genpd\n");
357 			goto failure;
358 		}
359 	}
360 
361 
362 	/* Assert Soft reset of ACP */
363 	val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET);
364 
365 	val |= ACP_SOFT_RESET__SoftResetAud_MASK;
366 	cgs_write_register(adev->acp.cgs_device, mmACP_SOFT_RESET, val);
367 
368 	count = ACP_SOFT_RESET_DONE_TIME_OUT_VALUE;
369 	while (true) {
370 		val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET);
371 		if (ACP_SOFT_RESET__SoftResetAudDone_MASK ==
372 		    (val & ACP_SOFT_RESET__SoftResetAudDone_MASK))
373 			break;
374 		if (--count == 0) {
375 			dev_err(&adev->pdev->dev, "Failed to reset ACP\n");
376 			r = -ETIMEDOUT;
377 			goto failure;
378 		}
379 		udelay(100);
380 	}
381 	/* Enable clock to ACP and wait until the clock is enabled */
382 	val = cgs_read_register(adev->acp.cgs_device, mmACP_CONTROL);
383 	val = val | ACP_CONTROL__ClkEn_MASK;
384 	cgs_write_register(adev->acp.cgs_device, mmACP_CONTROL, val);
385 
386 	count = ACP_CLOCK_EN_TIME_OUT_VALUE;
387 
388 	while (true) {
389 		val = cgs_read_register(adev->acp.cgs_device, mmACP_STATUS);
390 		if (val & (u32) 0x1)
391 			break;
392 		if (--count == 0) {
393 			dev_err(&adev->pdev->dev, "Failed to reset ACP\n");
394 			r = -ETIMEDOUT;
395 			goto failure;
396 		}
397 		udelay(100);
398 	}
399 	/* Deassert the SOFT RESET flags */
400 	val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET);
401 	val &= ~ACP_SOFT_RESET__SoftResetAud_MASK;
402 	cgs_write_register(adev->acp.cgs_device, mmACP_SOFT_RESET, val);
403 	return 0;
404 
405 failure:
406 	kfree(i2s_pdata);
407 	kfree(adev->acp.acp_res);
408 	kfree(adev->acp.acp_cell);
409 	kfree(adev->acp.acp_genpd);
410 	return r;
411 }
412 
413 /**
414  * acp_hw_fini - stop the hardware block
415  *
416  * @adev: amdgpu_device pointer
417  *
418  */
419 static int acp_hw_fini(void *handle)
420 {
421 	int i, ret;
422 	u32 val = 0;
423 	u32 count = 0;
424 	struct device *dev;
425 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
426 
427 	/* return early if no ACP */
428 	if (!adev->acp.acp_genpd) {
429 		amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, false);
430 		return 0;
431 	}
432 
433 	/* Assert Soft reset of ACP */
434 	val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET);
435 
436 	val |= ACP_SOFT_RESET__SoftResetAud_MASK;
437 	cgs_write_register(adev->acp.cgs_device, mmACP_SOFT_RESET, val);
438 
439 	count = ACP_SOFT_RESET_DONE_TIME_OUT_VALUE;
440 	while (true) {
441 		val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET);
442 		if (ACP_SOFT_RESET__SoftResetAudDone_MASK ==
443 		    (val & ACP_SOFT_RESET__SoftResetAudDone_MASK))
444 			break;
445 		if (--count == 0) {
446 			dev_err(&adev->pdev->dev, "Failed to reset ACP\n");
447 			return -ETIMEDOUT;
448 		}
449 		udelay(100);
450 	}
451 	/* Disable ACP clock */
452 	val = cgs_read_register(adev->acp.cgs_device, mmACP_CONTROL);
453 	val &= ~ACP_CONTROL__ClkEn_MASK;
454 	cgs_write_register(adev->acp.cgs_device, mmACP_CONTROL, val);
455 
456 	count = ACP_CLOCK_EN_TIME_OUT_VALUE;
457 
458 	while (true) {
459 		val = cgs_read_register(adev->acp.cgs_device, mmACP_STATUS);
460 		if (val & (u32) 0x1)
461 			break;
462 		if (--count == 0) {
463 			dev_err(&adev->pdev->dev, "Failed to reset ACP\n");
464 			return -ETIMEDOUT;
465 		}
466 		udelay(100);
467 	}
468 
469 	for (i = 0; i < ACP_DEVS ; i++) {
470 		dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i);
471 		ret = pm_genpd_remove_device(dev);
472 		/* If removal fails, dont giveup and try rest */
473 		if (ret)
474 			dev_err(dev, "remove dev from genpd failed\n");
475 	}
476 
477 	mfd_remove_devices(adev->acp.parent);
478 	kfree(adev->acp.acp_res);
479 	kfree(adev->acp.acp_genpd);
480 	kfree(adev->acp.acp_cell);
481 
482 	return 0;
483 }
484 
485 static int acp_suspend(void *handle)
486 {
487 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
488 
489 	/* power up on suspend */
490 	if (!adev->acp.acp_cell)
491 		amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, false);
492 	return 0;
493 }
494 
495 static int acp_resume(void *handle)
496 {
497 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
498 
499 	/* power down again on resume */
500 	if (!adev->acp.acp_cell)
501 		amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, true);
502 	return 0;
503 }
504 
505 static int acp_early_init(void *handle)
506 {
507 	return 0;
508 }
509 
510 static bool acp_is_idle(void *handle)
511 {
512 	return true;
513 }
514 
515 static int acp_wait_for_idle(void *handle)
516 {
517 	return 0;
518 }
519 
520 static int acp_soft_reset(void *handle)
521 {
522 	return 0;
523 }
524 
525 static int acp_set_clockgating_state(void *handle,
526 				     enum amd_clockgating_state state)
527 {
528 	return 0;
529 }
530 
531 static int acp_set_powergating_state(void *handle,
532 				     enum amd_powergating_state state)
533 {
534 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
535 	bool enable = (state == AMD_PG_STATE_GATE);
536 
537 	if (adev->powerplay.pp_funcs &&
538 		adev->powerplay.pp_funcs->set_powergating_by_smu)
539 		amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, enable);
540 
541 	return 0;
542 }
543 
544 static const struct amd_ip_funcs acp_ip_funcs = {
545 	.name = "acp_ip",
546 	.early_init = acp_early_init,
547 	.late_init = NULL,
548 	.sw_init = acp_sw_init,
549 	.sw_fini = acp_sw_fini,
550 	.hw_init = acp_hw_init,
551 	.hw_fini = acp_hw_fini,
552 	.suspend = acp_suspend,
553 	.resume = acp_resume,
554 	.is_idle = acp_is_idle,
555 	.wait_for_idle = acp_wait_for_idle,
556 	.soft_reset = acp_soft_reset,
557 	.set_clockgating_state = acp_set_clockgating_state,
558 	.set_powergating_state = acp_set_powergating_state,
559 };
560 
561 const struct amdgpu_ip_block_version acp_ip_block =
562 {
563 	.type = AMD_IP_BLOCK_TYPE_ACP,
564 	.major = 2,
565 	.minor = 2,
566 	.rev = 0,
567 	.funcs = &acp_ip_funcs,
568 };
569