1 /* $NetBSD: amdgpu_jpeg_v2_5.c,v 1.3 2021/12/19 12:02:39 riastradh Exp $ */
2
3 /*
4 * Copyright 2019 Advanced Micro Devices, Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 */
25
26 #include <sys/cdefs.h>
27 __KERNEL_RCSID(0, "$NetBSD: amdgpu_jpeg_v2_5.c,v 1.3 2021/12/19 12:02:39 riastradh Exp $");
28
29 #include "amdgpu.h"
30 #include "amdgpu_jpeg.h"
31 #include "soc15.h"
32 #include "soc15d.h"
33 #include "jpeg_v2_0.h"
34
35 #include "vcn/vcn_2_5_offset.h"
36 #include "vcn/vcn_2_5_sh_mask.h"
37 #include "ivsrcid/vcn/irqsrcs_vcn_2_0.h"
38
39 #define mmUVD_JPEG_PITCH_INTERNAL_OFFSET 0x401f
40
41 #define JPEG25_MAX_HW_INSTANCES_ARCTURUS 2
42
43 static void jpeg_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev);
44 static void jpeg_v2_5_set_irq_funcs(struct amdgpu_device *adev);
45 static int jpeg_v2_5_set_powergating_state(void *handle,
46 enum amd_powergating_state state);
47
48 static int amdgpu_ih_clientid_jpeg[] = {
49 SOC15_IH_CLIENTID_VCN,
50 SOC15_IH_CLIENTID_VCN1
51 };
52
53 /**
54 * jpeg_v2_5_early_init - set function pointers
55 *
56 * @handle: amdgpu_device pointer
57 *
58 * Set ring and irq function pointers
59 */
jpeg_v2_5_early_init(void * handle)60 static int jpeg_v2_5_early_init(void *handle)
61 {
62 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
63 if (adev->asic_type == CHIP_ARCTURUS) {
64 u32 harvest;
65 int i;
66
67 adev->jpeg.num_jpeg_inst = JPEG25_MAX_HW_INSTANCES_ARCTURUS;
68 for (i = 0; i < adev->jpeg.num_jpeg_inst; i++) {
69 harvest = RREG32_SOC15(JPEG, i, mmCC_UVD_HARVESTING);
70 if (harvest & CC_UVD_HARVESTING__UVD_DISABLE_MASK)
71 adev->jpeg.harvest_config |= 1 << i;
72 }
73
74 if (adev->jpeg.harvest_config == (AMDGPU_JPEG_HARVEST_JPEG0 |
75 AMDGPU_JPEG_HARVEST_JPEG1))
76 return -ENOENT;
77 } else
78 adev->jpeg.num_jpeg_inst = 1;
79
80 jpeg_v2_5_set_dec_ring_funcs(adev);
81 jpeg_v2_5_set_irq_funcs(adev);
82
83 return 0;
84 }
85
86 /**
87 * jpeg_v2_5_sw_init - sw init for JPEG block
88 *
89 * @handle: amdgpu_device pointer
90 *
91 * Load firmware and sw initialization
92 */
jpeg_v2_5_sw_init(void * handle)93 static int jpeg_v2_5_sw_init(void *handle)
94 {
95 struct amdgpu_ring *ring;
96 int i, r;
97 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
98
99 for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
100 if (adev->jpeg.harvest_config & (1 << i))
101 continue;
102
103 /* JPEG TRAP */
104 r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_jpeg[i],
105 VCN_2_0__SRCID__JPEG_DECODE, &adev->jpeg.inst[i].irq);
106 if (r)
107 return r;
108 }
109
110 r = amdgpu_jpeg_sw_init(adev);
111 if (r)
112 return r;
113
114 r = amdgpu_jpeg_resume(adev);
115 if (r)
116 return r;
117
118 for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
119 if (adev->jpeg.harvest_config & (1 << i))
120 continue;
121
122 ring = &adev->jpeg.inst[i].ring_dec;
123 ring->use_doorbell = true;
124 ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1 + 8 * i;
125 snprintf(ring->name, sizeof(ring->name), "jpeg_dec_%d", i);
126 r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst[i].irq, 0);
127 if (r)
128 return r;
129
130 adev->jpeg.internal.jpeg_pitch = mmUVD_JPEG_PITCH_INTERNAL_OFFSET;
131 adev->jpeg.inst[i].external.jpeg_pitch = SOC15_REG_OFFSET(JPEG, i, mmUVD_JPEG_PITCH);
132 }
133
134 return 0;
135 }
136
137 /**
138 * jpeg_v2_5_sw_fini - sw fini for JPEG block
139 *
140 * @handle: amdgpu_device pointer
141 *
142 * JPEG suspend and free up sw allocation
143 */
jpeg_v2_5_sw_fini(void * handle)144 static int jpeg_v2_5_sw_fini(void *handle)
145 {
146 int r;
147 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
148
149 r = amdgpu_jpeg_suspend(adev);
150 if (r)
151 return r;
152
153 r = amdgpu_jpeg_sw_fini(adev);
154
155 return r;
156 }
157
158 /**
159 * jpeg_v2_5_hw_init - start and test JPEG block
160 *
161 * @handle: amdgpu_device pointer
162 *
163 */
jpeg_v2_5_hw_init(void * handle)164 static int jpeg_v2_5_hw_init(void *handle)
165 {
166 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
167 struct amdgpu_ring *ring;
168 int i, r;
169
170 for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
171 if (adev->jpeg.harvest_config & (1 << i))
172 continue;
173
174 ring = &adev->jpeg.inst[i].ring_dec;
175 adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
176 (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 8 * i, i);
177
178 r = amdgpu_ring_test_helper(ring);
179 if (r)
180 return r;
181 }
182
183 DRM_INFO("JPEG decode initialized successfully.\n");
184
185 return 0;
186 }
187
188 /**
189 * jpeg_v2_5_hw_fini - stop the hardware block
190 *
191 * @handle: amdgpu_device pointer
192 *
193 * Stop the JPEG block, mark ring as not ready any more
194 */
jpeg_v2_5_hw_fini(void * handle)195 static int jpeg_v2_5_hw_fini(void *handle)
196 {
197 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
198 struct amdgpu_ring *ring;
199 int i;
200
201 for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
202 if (adev->jpeg.harvest_config & (1 << i))
203 continue;
204
205 ring = &adev->jpeg.inst[i].ring_dec;
206 if (adev->jpeg.cur_state != AMD_PG_STATE_GATE &&
207 RREG32_SOC15(JPEG, i, mmUVD_JRBC_STATUS))
208 jpeg_v2_5_set_powergating_state(adev, AMD_PG_STATE_GATE);
209
210 ring->sched.ready = false;
211 }
212
213 return 0;
214 }
215
216 /**
217 * jpeg_v2_5_suspend - suspend JPEG block
218 *
219 * @handle: amdgpu_device pointer
220 *
221 * HW fini and suspend JPEG block
222 */
jpeg_v2_5_suspend(void * handle)223 static int jpeg_v2_5_suspend(void *handle)
224 {
225 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
226 int r;
227
228 r = jpeg_v2_5_hw_fini(adev);
229 if (r)
230 return r;
231
232 r = amdgpu_jpeg_suspend(adev);
233
234 return r;
235 }
236
237 /**
238 * jpeg_v2_5_resume - resume JPEG block
239 *
240 * @handle: amdgpu_device pointer
241 *
242 * Resume firmware and hw init JPEG block
243 */
jpeg_v2_5_resume(void * handle)244 static int jpeg_v2_5_resume(void *handle)
245 {
246 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
247 int r;
248
249 r = amdgpu_jpeg_resume(adev);
250 if (r)
251 return r;
252
253 r = jpeg_v2_5_hw_init(adev);
254
255 return r;
256 }
257
jpeg_v2_5_disable_clock_gating(struct amdgpu_device * adev,int inst)258 static void jpeg_v2_5_disable_clock_gating(struct amdgpu_device* adev, int inst)
259 {
260 uint32_t data;
261
262 data = RREG32_SOC15(JPEG, inst, mmJPEG_CGC_CTRL);
263 if (adev->cg_flags & AMD_CG_SUPPORT_JPEG_MGCG)
264 data |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
265 else
266 data &= ~JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
267
268 data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
269 data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
270 WREG32_SOC15(JPEG, inst, mmJPEG_CGC_CTRL, data);
271
272 data = RREG32_SOC15(JPEG, inst, mmJPEG_CGC_GATE);
273 data &= ~(JPEG_CGC_GATE__JPEG_DEC_MASK
274 | JPEG_CGC_GATE__JPEG2_DEC_MASK
275 | JPEG_CGC_GATE__JPEG_ENC_MASK
276 | JPEG_CGC_GATE__JMCIF_MASK
277 | JPEG_CGC_GATE__JRBBM_MASK);
278 WREG32_SOC15(JPEG, inst, mmJPEG_CGC_GATE, data);
279
280 data = RREG32_SOC15(JPEG, inst, mmJPEG_CGC_CTRL);
281 data &= ~(JPEG_CGC_CTRL__JPEG_DEC_MODE_MASK
282 | JPEG_CGC_CTRL__JPEG2_DEC_MODE_MASK
283 | JPEG_CGC_CTRL__JMCIF_MODE_MASK
284 | JPEG_CGC_CTRL__JRBBM_MODE_MASK);
285 WREG32_SOC15(JPEG, inst, mmJPEG_CGC_CTRL, data);
286 }
287
jpeg_v2_5_enable_clock_gating(struct amdgpu_device * adev,int inst)288 static void jpeg_v2_5_enable_clock_gating(struct amdgpu_device* adev, int inst)
289 {
290 uint32_t data;
291
292 data = RREG32_SOC15(JPEG, inst, mmJPEG_CGC_GATE);
293 data |= (JPEG_CGC_GATE__JPEG_DEC_MASK
294 |JPEG_CGC_GATE__JPEG2_DEC_MASK
295 |JPEG_CGC_GATE__JPEG_ENC_MASK
296 |JPEG_CGC_GATE__JMCIF_MASK
297 |JPEG_CGC_GATE__JRBBM_MASK);
298 WREG32_SOC15(JPEG, inst, mmJPEG_CGC_GATE, data);
299 }
300
301 /**
302 * jpeg_v2_5_start - start JPEG block
303 *
304 * @adev: amdgpu_device pointer
305 *
306 * Setup and start the JPEG block
307 */
jpeg_v2_5_start(struct amdgpu_device * adev)308 static int jpeg_v2_5_start(struct amdgpu_device *adev)
309 {
310 struct amdgpu_ring *ring;
311 int i;
312
313 for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
314 if (adev->jpeg.harvest_config & (1 << i))
315 continue;
316
317 ring = &adev->jpeg.inst[i].ring_dec;
318 /* disable anti hang mechanism */
319 WREG32_P(SOC15_REG_OFFSET(JPEG, i, mmUVD_JPEG_POWER_STATUS), 0,
320 ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK);
321
322 /* JPEG disable CGC */
323 jpeg_v2_5_disable_clock_gating(adev, i);
324
325 /* MJPEG global tiling registers */
326 WREG32_SOC15(JPEG, i, mmJPEG_DEC_GFX8_ADDR_CONFIG,
327 adev->gfx.config.gb_addr_config);
328 WREG32_SOC15(JPEG, i, mmJPEG_DEC_GFX10_ADDR_CONFIG,
329 adev->gfx.config.gb_addr_config);
330
331 /* enable JMI channel */
332 WREG32_P(SOC15_REG_OFFSET(JPEG, i, mmUVD_JMI_CNTL), 0,
333 ~UVD_JMI_CNTL__SOFT_RESET_MASK);
334
335 /* enable System Interrupt for JRBC */
336 WREG32_P(SOC15_REG_OFFSET(JPEG, i, mmJPEG_SYS_INT_EN),
337 JPEG_SYS_INT_EN__DJRBC_MASK,
338 ~JPEG_SYS_INT_EN__DJRBC_MASK);
339
340 WREG32_SOC15(JPEG, i, mmUVD_LMI_JRBC_RB_VMID, 0);
341 WREG32_SOC15(JPEG, i, mmUVD_JRBC_RB_CNTL, (0x00000001L | 0x00000002L));
342 WREG32_SOC15(JPEG, i, mmUVD_LMI_JRBC_RB_64BIT_BAR_LOW,
343 lower_32_bits(ring->gpu_addr));
344 WREG32_SOC15(JPEG, i, mmUVD_LMI_JRBC_RB_64BIT_BAR_HIGH,
345 upper_32_bits(ring->gpu_addr));
346 WREG32_SOC15(JPEG, i, mmUVD_JRBC_RB_RPTR, 0);
347 WREG32_SOC15(JPEG, i, mmUVD_JRBC_RB_WPTR, 0);
348 WREG32_SOC15(JPEG, i, mmUVD_JRBC_RB_CNTL, 0x00000002L);
349 WREG32_SOC15(JPEG, i, mmUVD_JRBC_RB_SIZE, ring->ring_size / 4);
350 ring->wptr = RREG32_SOC15(JPEG, i, mmUVD_JRBC_RB_WPTR);
351 }
352
353 return 0;
354 }
355
356 /**
357 * jpeg_v2_5_stop - stop JPEG block
358 *
359 * @adev: amdgpu_device pointer
360 *
361 * stop the JPEG block
362 */
jpeg_v2_5_stop(struct amdgpu_device * adev)363 static int jpeg_v2_5_stop(struct amdgpu_device *adev)
364 {
365 int i;
366
367 for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
368 if (adev->jpeg.harvest_config & (1 << i))
369 continue;
370
371 /* reset JMI */
372 WREG32_P(SOC15_REG_OFFSET(JPEG, i, mmUVD_JMI_CNTL),
373 UVD_JMI_CNTL__SOFT_RESET_MASK,
374 ~UVD_JMI_CNTL__SOFT_RESET_MASK);
375
376 jpeg_v2_5_enable_clock_gating(adev, i);
377
378 /* enable anti hang mechanism */
379 WREG32_P(SOC15_REG_OFFSET(JPEG, i, mmUVD_JPEG_POWER_STATUS),
380 UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK,
381 ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK);
382 }
383
384 return 0;
385 }
386
387 /**
388 * jpeg_v2_5_dec_ring_get_rptr - get read pointer
389 *
390 * @ring: amdgpu_ring pointer
391 *
392 * Returns the current hardware read pointer
393 */
jpeg_v2_5_dec_ring_get_rptr(struct amdgpu_ring * ring)394 static uint64_t jpeg_v2_5_dec_ring_get_rptr(struct amdgpu_ring *ring)
395 {
396 struct amdgpu_device *adev = ring->adev;
397
398 return RREG32_SOC15(JPEG, ring->me, mmUVD_JRBC_RB_RPTR);
399 }
400
401 /**
402 * jpeg_v2_5_dec_ring_get_wptr - get write pointer
403 *
404 * @ring: amdgpu_ring pointer
405 *
406 * Returns the current hardware write pointer
407 */
jpeg_v2_5_dec_ring_get_wptr(struct amdgpu_ring * ring)408 static uint64_t jpeg_v2_5_dec_ring_get_wptr(struct amdgpu_ring *ring)
409 {
410 struct amdgpu_device *adev = ring->adev;
411
412 if (ring->use_doorbell)
413 return adev->wb.wb[ring->wptr_offs];
414 else
415 return RREG32_SOC15(JPEG, ring->me, mmUVD_JRBC_RB_WPTR);
416 }
417
418 /**
419 * jpeg_v2_5_dec_ring_set_wptr - set write pointer
420 *
421 * @ring: amdgpu_ring pointer
422 *
423 * Commits the write pointer to the hardware
424 */
jpeg_v2_5_dec_ring_set_wptr(struct amdgpu_ring * ring)425 static void jpeg_v2_5_dec_ring_set_wptr(struct amdgpu_ring *ring)
426 {
427 struct amdgpu_device *adev = ring->adev;
428
429 if (ring->use_doorbell) {
430 adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
431 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
432 } else {
433 WREG32_SOC15(JPEG, ring->me, mmUVD_JRBC_RB_WPTR, lower_32_bits(ring->wptr));
434 }
435 }
436
jpeg_v2_5_is_idle(void * handle)437 static bool jpeg_v2_5_is_idle(void *handle)
438 {
439 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
440 int i, ret = 1;
441
442 for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
443 if (adev->jpeg.harvest_config & (1 << i))
444 continue;
445
446 ret &= (((RREG32_SOC15(JPEG, i, mmUVD_JRBC_STATUS) &
447 UVD_JRBC_STATUS__RB_JOB_DONE_MASK) ==
448 UVD_JRBC_STATUS__RB_JOB_DONE_MASK));
449 }
450
451 return ret;
452 }
453
jpeg_v2_5_wait_for_idle(void * handle)454 static int jpeg_v2_5_wait_for_idle(void *handle)
455 {
456 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
457 int i, ret = 0;
458
459 for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
460 if (adev->jpeg.harvest_config & (1 << i))
461 continue;
462
463 SOC15_WAIT_ON_RREG(JPEG, i, mmUVD_JRBC_STATUS,
464 UVD_JRBC_STATUS__RB_JOB_DONE_MASK,
465 UVD_JRBC_STATUS__RB_JOB_DONE_MASK, ret);
466 if (ret)
467 return ret;
468 }
469
470 return ret;
471 }
472
jpeg_v2_5_set_clockgating_state(void * handle,enum amd_clockgating_state state)473 static int jpeg_v2_5_set_clockgating_state(void *handle,
474 enum amd_clockgating_state state)
475 {
476 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
477 bool enable = (state == AMD_CG_STATE_GATE);
478 int i;
479
480 for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
481 if (adev->jpeg.harvest_config & (1 << i))
482 continue;
483
484 if (enable) {
485 if (jpeg_v2_5_is_idle(handle))
486 return -EBUSY;
487 jpeg_v2_5_enable_clock_gating(adev, i);
488 } else {
489 jpeg_v2_5_disable_clock_gating(adev, i);
490 }
491 }
492
493 return 0;
494 }
495
jpeg_v2_5_set_powergating_state(void * handle,enum amd_powergating_state state)496 static int jpeg_v2_5_set_powergating_state(void *handle,
497 enum amd_powergating_state state)
498 {
499 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
500 int ret;
501
502 if(state == adev->jpeg.cur_state)
503 return 0;
504
505 if (state == AMD_PG_STATE_GATE)
506 ret = jpeg_v2_5_stop(adev);
507 else
508 ret = jpeg_v2_5_start(adev);
509
510 if(!ret)
511 adev->jpeg.cur_state = state;
512
513 return ret;
514 }
515
jpeg_v2_5_set_interrupt_state(struct amdgpu_device * adev,struct amdgpu_irq_src * source,unsigned type,enum amdgpu_interrupt_state state)516 static int jpeg_v2_5_set_interrupt_state(struct amdgpu_device *adev,
517 struct amdgpu_irq_src *source,
518 unsigned type,
519 enum amdgpu_interrupt_state state)
520 {
521 return 0;
522 }
523
jpeg_v2_5_process_interrupt(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)524 static int jpeg_v2_5_process_interrupt(struct amdgpu_device *adev,
525 struct amdgpu_irq_src *source,
526 struct amdgpu_iv_entry *entry)
527 {
528 uint32_t ip_instance;
529
530 switch (entry->client_id) {
531 case SOC15_IH_CLIENTID_VCN:
532 ip_instance = 0;
533 break;
534 case SOC15_IH_CLIENTID_VCN1:
535 ip_instance = 1;
536 break;
537 default:
538 DRM_ERROR("Unhandled client id: %d\n", entry->client_id);
539 return 0;
540 }
541
542 DRM_DEBUG("IH: JPEG TRAP\n");
543
544 switch (entry->src_id) {
545 case VCN_2_0__SRCID__JPEG_DECODE:
546 amdgpu_fence_process(&adev->jpeg.inst[ip_instance].ring_dec);
547 break;
548 default:
549 DRM_ERROR("Unhandled interrupt: %d %d\n",
550 entry->src_id, entry->src_data[0]);
551 break;
552 }
553
554 return 0;
555 }
556
557 static const struct amd_ip_funcs jpeg_v2_5_ip_funcs = {
558 .name = "jpeg_v2_5",
559 .early_init = jpeg_v2_5_early_init,
560 .late_init = NULL,
561 .sw_init = jpeg_v2_5_sw_init,
562 .sw_fini = jpeg_v2_5_sw_fini,
563 .hw_init = jpeg_v2_5_hw_init,
564 .hw_fini = jpeg_v2_5_hw_fini,
565 .suspend = jpeg_v2_5_suspend,
566 .resume = jpeg_v2_5_resume,
567 .is_idle = jpeg_v2_5_is_idle,
568 .wait_for_idle = jpeg_v2_5_wait_for_idle,
569 .check_soft_reset = NULL,
570 .pre_soft_reset = NULL,
571 .soft_reset = NULL,
572 .post_soft_reset = NULL,
573 .set_clockgating_state = jpeg_v2_5_set_clockgating_state,
574 .set_powergating_state = jpeg_v2_5_set_powergating_state,
575 };
576
577 static const struct amdgpu_ring_funcs jpeg_v2_5_dec_ring_vm_funcs = {
578 .type = AMDGPU_RING_TYPE_VCN_JPEG,
579 .align_mask = 0xf,
580 .vmhub = AMDGPU_MMHUB_1,
581 .get_rptr = jpeg_v2_5_dec_ring_get_rptr,
582 .get_wptr = jpeg_v2_5_dec_ring_get_wptr,
583 .set_wptr = jpeg_v2_5_dec_ring_set_wptr,
584 .emit_frame_size =
585 SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
586 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
587 8 + /* jpeg_v2_5_dec_ring_emit_vm_flush */
588 18 + 18 + /* jpeg_v2_5_dec_ring_emit_fence x2 vm fence */
589 8 + 16,
590 .emit_ib_size = 22, /* jpeg_v2_5_dec_ring_emit_ib */
591 .emit_ib = jpeg_v2_0_dec_ring_emit_ib,
592 .emit_fence = jpeg_v2_0_dec_ring_emit_fence,
593 .emit_vm_flush = jpeg_v2_0_dec_ring_emit_vm_flush,
594 .test_ring = amdgpu_jpeg_dec_ring_test_ring,
595 .test_ib = amdgpu_jpeg_dec_ring_test_ib,
596 .insert_nop = jpeg_v2_0_dec_ring_nop,
597 .insert_start = jpeg_v2_0_dec_ring_insert_start,
598 .insert_end = jpeg_v2_0_dec_ring_insert_end,
599 .pad_ib = amdgpu_ring_generic_pad_ib,
600 .begin_use = amdgpu_jpeg_ring_begin_use,
601 .end_use = amdgpu_jpeg_ring_end_use,
602 .emit_wreg = jpeg_v2_0_dec_ring_emit_wreg,
603 .emit_reg_wait = jpeg_v2_0_dec_ring_emit_reg_wait,
604 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
605 };
606
jpeg_v2_5_set_dec_ring_funcs(struct amdgpu_device * adev)607 static void jpeg_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev)
608 {
609 int i;
610
611 for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
612 if (adev->jpeg.harvest_config & (1 << i))
613 continue;
614
615 adev->jpeg.inst[i].ring_dec.funcs = &jpeg_v2_5_dec_ring_vm_funcs;
616 adev->jpeg.inst[i].ring_dec.me = i;
617 DRM_INFO("JPEG(%d) JPEG decode is enabled in VM mode\n", i);
618 }
619 }
620
621 static const struct amdgpu_irq_src_funcs jpeg_v2_5_irq_funcs = {
622 .set = jpeg_v2_5_set_interrupt_state,
623 .process = jpeg_v2_5_process_interrupt,
624 };
625
jpeg_v2_5_set_irq_funcs(struct amdgpu_device * adev)626 static void jpeg_v2_5_set_irq_funcs(struct amdgpu_device *adev)
627 {
628 int i;
629
630 for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
631 if (adev->jpeg.harvest_config & (1 << i))
632 continue;
633
634 adev->jpeg.inst[i].irq.num_types = 1;
635 adev->jpeg.inst[i].irq.funcs = &jpeg_v2_5_irq_funcs;
636 }
637 }
638
639 const struct amdgpu_ip_block_version jpeg_v2_5_ip_block =
640 {
641 .type = AMD_IP_BLOCK_TYPE_JPEG,
642 .major = 2,
643 .minor = 5,
644 .rev = 0,
645 .funcs = &jpeg_v2_5_ip_funcs,
646 };
647