1 /* $NetBSD: amdgpu_dm_irq.c,v 1.3 2021/12/19 12:31:45 riastradh Exp $ */
2
3 /*
4 * Copyright 2015 Advanced Micro Devices, Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: AMD
25 *
26 */
27
28 #include <sys/cdefs.h>
29 __KERNEL_RCSID(0, "$NetBSD: amdgpu_dm_irq.c,v 1.3 2021/12/19 12:31:45 riastradh Exp $");
30
31 #include "dm_services_types.h"
32 #include "dc.h"
33
34 #include "amdgpu.h"
35 #include "amdgpu_dm.h"
36 #include "amdgpu_dm_irq.h"
37
38 /**
39 * DOC: overview
40 *
41 * DM provides another layer of IRQ management on top of what the base driver
42 * already provides. This is something that could be cleaned up, and is a
43 * future TODO item.
44 *
45 * The base driver provides IRQ source registration with DRM, handler
46 * registration into the base driver's IRQ table, and a handler callback
47 * amdgpu_irq_handler(), with which DRM calls on interrupts. This generic
48 * handler looks up the IRQ table, and calls the respective
49 * &amdgpu_irq_src_funcs.process hookups.
50 *
51 * What DM provides on top are two IRQ tables specifically for top-half and
52 * bottom-half IRQ handling, with the bottom-half implementing workqueues:
53 *
54 * - &amdgpu_display_manager.irq_handler_list_high_tab
55 * - &amdgpu_display_manager.irq_handler_list_low_tab
56 *
57 * They override the base driver's IRQ table, and the effect can be seen
58 * in the hooks that DM provides for &amdgpu_irq_src_funcs.process. They
59 * are all set to the DM generic handler amdgpu_dm_irq_handler(), which looks up
60 * DM's IRQ tables. However, in order for base driver to recognize this hook, DM
61 * still needs to register the IRQ with the base driver. See
62 * dce110_register_irq_handlers() and dcn10_register_irq_handlers().
63 *
64 * To expose DC's hardware interrupt toggle to the base driver, DM implements
65 * &amdgpu_irq_src_funcs.set hooks. Base driver calls it through
66 * amdgpu_irq_update() to enable or disable the interrupt.
67 */
68
69 /******************************************************************************
70 * Private declarations.
71 *****************************************************************************/
72
73 /**
74 * struct amdgpu_dm_irq_handler_data - Data for DM interrupt handlers.
75 *
76 * @list: Linked list entry referencing the next/previous handler
77 * @handler: Handler function
78 * @handler_arg: Argument passed to the handler when triggered
79 * @dm: DM which this handler belongs to
80 * @irq_source: DC interrupt source that this handler is registered for
81 */
82 struct amdgpu_dm_irq_handler_data {
83 struct list_head list;
84 interrupt_handler handler;
85 void *handler_arg;
86
87 struct amdgpu_display_manager *dm;
88 /* DAL irq source which registered for this interrupt. */
89 enum dc_irq_source irq_source;
90 };
91
92 #define DM_IRQ_TABLE_LOCK(adev, flags) \
93 spin_lock_irqsave(&adev->dm.irq_handler_list_table_lock, flags)
94
95 #define DM_IRQ_TABLE_UNLOCK(adev, flags) \
96 spin_unlock_irqrestore(&adev->dm.irq_handler_list_table_lock, flags)
97
98 /******************************************************************************
99 * Private functions.
100 *****************************************************************************/
101
init_handler_common_data(struct amdgpu_dm_irq_handler_data * hcd,void (* ih)(void *),void * args,struct amdgpu_display_manager * dm)102 static void init_handler_common_data(struct amdgpu_dm_irq_handler_data *hcd,
103 void (*ih)(void *),
104 void *args,
105 struct amdgpu_display_manager *dm)
106 {
107 hcd->handler = ih;
108 hcd->handler_arg = args;
109 hcd->dm = dm;
110 }
111
112 /**
113 * dm_irq_work_func() - Handle an IRQ outside of the interrupt handler proper.
114 *
115 * @work: work struct
116 */
dm_irq_work_func(struct work_struct * work)117 static void dm_irq_work_func(struct work_struct *work)
118 {
119 struct irq_list_head *irq_list_head =
120 container_of(work, struct irq_list_head, work);
121 struct list_head *handler_list = &irq_list_head->head;
122 struct amdgpu_dm_irq_handler_data *handler_data;
123
124 list_for_each_entry(handler_data, handler_list, list) {
125 DRM_DEBUG_KMS("DM_IRQ: work_func: for dal_src=%d\n",
126 handler_data->irq_source);
127
128 DRM_DEBUG_KMS("DM_IRQ: schedule_work: for dal_src=%d\n",
129 handler_data->irq_source);
130
131 handler_data->handler(handler_data->handler_arg);
132 }
133
134 /* Call a DAL subcomponent which registered for interrupt notification
135 * at INTERRUPT_LOW_IRQ_CONTEXT.
136 * (The most common use is HPD interrupt) */
137 }
138
139 /*
140 * Remove a handler and return a pointer to handler list from which the
141 * handler was removed.
142 */
remove_irq_handler(struct amdgpu_device * adev,void * ih,const struct dc_interrupt_params * int_params)143 static struct list_head *remove_irq_handler(struct amdgpu_device *adev,
144 void *ih,
145 const struct dc_interrupt_params *int_params)
146 {
147 struct list_head *hnd_list;
148 struct list_head *entry, *tmp;
149 struct amdgpu_dm_irq_handler_data *handler;
150 unsigned long irq_table_flags;
151 bool handler_removed = false;
152 enum dc_irq_source irq_source;
153
154 DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
155
156 irq_source = int_params->irq_source;
157
158 switch (int_params->int_context) {
159 case INTERRUPT_HIGH_IRQ_CONTEXT:
160 hnd_list = &adev->dm.irq_handler_list_high_tab[irq_source];
161 break;
162 case INTERRUPT_LOW_IRQ_CONTEXT:
163 default:
164 hnd_list = &adev->dm.irq_handler_list_low_tab[irq_source].head;
165 break;
166 }
167
168 list_for_each_safe(entry, tmp, hnd_list) {
169
170 handler = list_entry(entry, struct amdgpu_dm_irq_handler_data,
171 list);
172
173 if (ih == handler) {
174 /* Found our handler. Remove it from the list. */
175 list_del(&handler->list);
176 handler_removed = true;
177 break;
178 }
179 }
180
181 DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
182
183 if (handler_removed == false) {
184 /* Not necessarily an error - caller may not
185 * know the context. */
186 return NULL;
187 }
188
189 kfree(handler);
190
191 DRM_DEBUG_KMS(
192 "DM_IRQ: removed irq handler: %p for: dal_src=%d, irq context=%d\n",
193 ih, int_params->irq_source, int_params->int_context);
194
195 return hnd_list;
196 }
197
198 static bool
validate_irq_registration_params(struct dc_interrupt_params * int_params,void (* ih)(void *))199 validate_irq_registration_params(struct dc_interrupt_params *int_params,
200 void (*ih)(void *))
201 {
202 if (NULL == int_params || NULL == ih) {
203 DRM_ERROR("DM_IRQ: invalid input!\n");
204 return false;
205 }
206
207 if (int_params->int_context >= INTERRUPT_CONTEXT_NUMBER) {
208 DRM_ERROR("DM_IRQ: invalid context: %d!\n",
209 int_params->int_context);
210 return false;
211 }
212
213 if (!DAL_VALID_IRQ_SRC_NUM(int_params->irq_source)) {
214 DRM_ERROR("DM_IRQ: invalid irq_source: %d!\n",
215 int_params->irq_source);
216 return false;
217 }
218
219 return true;
220 }
221
validate_irq_unregistration_params(enum dc_irq_source irq_source,irq_handler_idx handler_idx)222 static bool validate_irq_unregistration_params(enum dc_irq_source irq_source,
223 irq_handler_idx handler_idx)
224 {
225 if (DAL_INVALID_IRQ_HANDLER_IDX == handler_idx) {
226 DRM_ERROR("DM_IRQ: invalid handler_idx==NULL!\n");
227 return false;
228 }
229
230 if (!DAL_VALID_IRQ_SRC_NUM(irq_source)) {
231 DRM_ERROR("DM_IRQ: invalid irq_source:%d!\n", irq_source);
232 return false;
233 }
234
235 return true;
236 }
237 /******************************************************************************
238 * Public functions.
239 *
240 * Note: caller is responsible for input validation.
241 *****************************************************************************/
242
243 /**
244 * amdgpu_dm_irq_register_interrupt() - Register a handler within DM.
245 * @adev: The base driver device containing the DM device.
246 * @int_params: Interrupt parameters containing the source, and handler context
247 * @ih: Function pointer to the interrupt handler to register
248 * @handler_args: Arguments passed to the handler when the interrupt occurs
249 *
250 * Register an interrupt handler for the given IRQ source, under the given
251 * context. The context can either be high or low. High context handlers are
252 * executed directly within ISR context, while low context is executed within a
253 * workqueue, thereby allowing operations that sleep.
254 *
255 * Registered handlers are called in a FIFO manner, i.e. the most recently
256 * registered handler will be called first.
257 *
258 * Return: Handler data &struct amdgpu_dm_irq_handler_data containing the IRQ
259 * source, handler function, and args
260 */
amdgpu_dm_irq_register_interrupt(struct amdgpu_device * adev,struct dc_interrupt_params * int_params,void (* ih)(void *),void * handler_args)261 void *amdgpu_dm_irq_register_interrupt(struct amdgpu_device *adev,
262 struct dc_interrupt_params *int_params,
263 void (*ih)(void *),
264 void *handler_args)
265 {
266 struct list_head *hnd_list;
267 struct amdgpu_dm_irq_handler_data *handler_data;
268 unsigned long irq_table_flags;
269 enum dc_irq_source irq_source;
270
271 if (false == validate_irq_registration_params(int_params, ih))
272 return DAL_INVALID_IRQ_HANDLER_IDX;
273
274 handler_data = kzalloc(sizeof(*handler_data), GFP_KERNEL);
275 if (!handler_data) {
276 DRM_ERROR("DM_IRQ: failed to allocate irq handler!\n");
277 return DAL_INVALID_IRQ_HANDLER_IDX;
278 }
279
280 init_handler_common_data(handler_data, ih, handler_args, &adev->dm);
281
282 irq_source = int_params->irq_source;
283
284 handler_data->irq_source = irq_source;
285
286 /* Lock the list, add the handler. */
287 DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
288
289 switch (int_params->int_context) {
290 case INTERRUPT_HIGH_IRQ_CONTEXT:
291 hnd_list = &adev->dm.irq_handler_list_high_tab[irq_source];
292 break;
293 case INTERRUPT_LOW_IRQ_CONTEXT:
294 default:
295 hnd_list = &adev->dm.irq_handler_list_low_tab[irq_source].head;
296 break;
297 }
298
299 list_add_tail(&handler_data->list, hnd_list);
300
301 DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
302
303 /* This pointer will be stored by code which requested interrupt
304 * registration.
305 * The same pointer will be needed in order to unregister the
306 * interrupt. */
307
308 DRM_DEBUG_KMS(
309 "DM_IRQ: added irq handler: %p for: dal_src=%d, irq context=%d\n",
310 handler_data,
311 irq_source,
312 int_params->int_context);
313
314 return handler_data;
315 }
316
317 /**
318 * amdgpu_dm_irq_unregister_interrupt() - Remove a handler from the DM IRQ table
319 * @adev: The base driver device containing the DM device
320 * @irq_source: IRQ source to remove the given handler from
321 * @ih: Function pointer to the interrupt handler to unregister
322 *
323 * Go through both low and high context IRQ tables, and find the given handler
324 * for the given irq source. If found, remove it. Otherwise, do nothing.
325 */
amdgpu_dm_irq_unregister_interrupt(struct amdgpu_device * adev,enum dc_irq_source irq_source,void * ih)326 void amdgpu_dm_irq_unregister_interrupt(struct amdgpu_device *adev,
327 enum dc_irq_source irq_source,
328 void *ih)
329 {
330 struct list_head *handler_list;
331 struct dc_interrupt_params int_params;
332 int i;
333
334 if (false == validate_irq_unregistration_params(irq_source, ih))
335 return;
336
337 memset(&int_params, 0, sizeof(int_params));
338
339 int_params.irq_source = irq_source;
340
341 for (i = 0; i < INTERRUPT_CONTEXT_NUMBER; i++) {
342
343 int_params.int_context = i;
344
345 handler_list = remove_irq_handler(adev, ih, &int_params);
346
347 if (handler_list != NULL)
348 break;
349 }
350
351 if (handler_list == NULL) {
352 /* If we got here, it means we searched all irq contexts
353 * for this irq source, but the handler was not found. */
354 DRM_ERROR(
355 "DM_IRQ: failed to find irq handler:%p for irq_source:%d!\n",
356 ih, irq_source);
357 }
358 }
359
360 /**
361 * amdgpu_dm_irq_init() - Initialize DM IRQ management
362 * @adev: The base driver device containing the DM device
363 *
364 * Initialize DM's high and low context IRQ tables.
365 *
366 * The N by M table contains N IRQ sources, with M
367 * &struct amdgpu_dm_irq_handler_data hooked together in a linked list. The
368 * list_heads are initialized here. When an interrupt n is triggered, all m
369 * handlers are called in sequence, FIFO according to registration order.
370 *
371 * The low context table requires special steps to initialize, since handlers
372 * will be deferred to a workqueue. See &struct irq_list_head.
373 */
amdgpu_dm_irq_init(struct amdgpu_device * adev)374 int amdgpu_dm_irq_init(struct amdgpu_device *adev)
375 {
376 int src;
377 struct irq_list_head *lh;
378
379 DRM_DEBUG_KMS("DM_IRQ\n");
380
381 spin_lock_init(&adev->dm.irq_handler_list_table_lock);
382
383 for (src = 0; src < DAL_IRQ_SOURCES_NUMBER; src++) {
384 /* low context handler list init */
385 lh = &adev->dm.irq_handler_list_low_tab[src];
386 INIT_LIST_HEAD(&lh->head);
387 INIT_WORK(&lh->work, dm_irq_work_func);
388
389 /* high context handler init */
390 INIT_LIST_HEAD(&adev->dm.irq_handler_list_high_tab[src]);
391 }
392
393 return 0;
394 }
395
396 /**
397 * amdgpu_dm_irq_fini() - Tear down DM IRQ management
398 * @adev: The base driver device containing the DM device
399 *
400 * Flush all work within the low context IRQ table.
401 */
amdgpu_dm_irq_fini(struct amdgpu_device * adev)402 void amdgpu_dm_irq_fini(struct amdgpu_device *adev)
403 {
404 int src;
405 struct irq_list_head *lh;
406 unsigned long irq_table_flags;
407 DRM_DEBUG_KMS("DM_IRQ: releasing resources.\n");
408 for (src = 0; src < DAL_IRQ_SOURCES_NUMBER; src++) {
409 DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
410 /* The handler was removed from the table,
411 * it means it is safe to flush all the 'work'
412 * (because no code can schedule a new one). */
413 lh = &adev->dm.irq_handler_list_low_tab[src];
414 DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
415 flush_work(&lh->work);
416 }
417
418 spin_lock_destroy(&adev->dm.irq_handler_list_table_lock);
419 }
420
amdgpu_dm_irq_suspend(struct amdgpu_device * adev)421 int amdgpu_dm_irq_suspend(struct amdgpu_device *adev)
422 {
423 int src;
424 struct list_head *hnd_list_h;
425 struct list_head *hnd_list_l;
426 unsigned long irq_table_flags;
427
428 DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
429
430 DRM_DEBUG_KMS("DM_IRQ: suspend\n");
431
432 /**
433 * Disable HW interrupt for HPD and HPDRX only since FLIP and VBLANK
434 * will be disabled from manage_dm_interrupts on disable CRTC.
435 */
436 for (src = DC_IRQ_SOURCE_HPD1; src <= DC_IRQ_SOURCE_HPD6RX; src++) {
437 hnd_list_l = &adev->dm.irq_handler_list_low_tab[src].head;
438 hnd_list_h = &adev->dm.irq_handler_list_high_tab[src];
439 if (!list_empty(hnd_list_l) || !list_empty(hnd_list_h))
440 dc_interrupt_set(adev->dm.dc, src, false);
441
442 DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
443 flush_work(&adev->dm.irq_handler_list_low_tab[src].work);
444
445 DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
446 }
447
448 DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
449 return 0;
450 }
451
amdgpu_dm_irq_resume_early(struct amdgpu_device * adev)452 int amdgpu_dm_irq_resume_early(struct amdgpu_device *adev)
453 {
454 int src;
455 struct list_head *hnd_list_h, *hnd_list_l;
456 unsigned long irq_table_flags;
457
458 DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
459
460 DRM_DEBUG_KMS("DM_IRQ: early resume\n");
461
462 /* re-enable short pulse interrupts HW interrupt */
463 for (src = DC_IRQ_SOURCE_HPD1RX; src <= DC_IRQ_SOURCE_HPD6RX; src++) {
464 hnd_list_l = &adev->dm.irq_handler_list_low_tab[src].head;
465 hnd_list_h = &adev->dm.irq_handler_list_high_tab[src];
466 if (!list_empty(hnd_list_l) || !list_empty(hnd_list_h))
467 dc_interrupt_set(adev->dm.dc, src, true);
468 }
469
470 DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
471
472 return 0;
473 }
474
amdgpu_dm_irq_resume_late(struct amdgpu_device * adev)475 int amdgpu_dm_irq_resume_late(struct amdgpu_device *adev)
476 {
477 int src;
478 struct list_head *hnd_list_h, *hnd_list_l;
479 unsigned long irq_table_flags;
480
481 DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
482
483 DRM_DEBUG_KMS("DM_IRQ: resume\n");
484
485 /**
486 * Renable HW interrupt for HPD and only since FLIP and VBLANK
487 * will be enabled from manage_dm_interrupts on enable CRTC.
488 */
489 for (src = DC_IRQ_SOURCE_HPD1; src <= DC_IRQ_SOURCE_HPD6; src++) {
490 hnd_list_l = &adev->dm.irq_handler_list_low_tab[src].head;
491 hnd_list_h = &adev->dm.irq_handler_list_high_tab[src];
492 if (!list_empty(hnd_list_l) || !list_empty(hnd_list_h))
493 dc_interrupt_set(adev->dm.dc, src, true);
494 }
495
496 DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
497 return 0;
498 }
499
500 /*
501 * amdgpu_dm_irq_schedule_work - schedule all work items registered for the
502 * "irq_source".
503 */
amdgpu_dm_irq_schedule_work(struct amdgpu_device * adev,enum dc_irq_source irq_source)504 static void amdgpu_dm_irq_schedule_work(struct amdgpu_device *adev,
505 enum dc_irq_source irq_source)
506 {
507 unsigned long irq_table_flags;
508 struct work_struct *work = NULL;
509
510 DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
511
512 if (!list_empty(&adev->dm.irq_handler_list_low_tab[irq_source].head))
513 work = &adev->dm.irq_handler_list_low_tab[irq_source].work;
514
515 DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
516
517 if (work) {
518 if (!schedule_work(work))
519 DRM_INFO("amdgpu_dm_irq_schedule_work FAILED src %d\n",
520 irq_source);
521 }
522
523 }
524
525 /*
526 * amdgpu_dm_irq_immediate_work
527 * Callback high irq work immediately, don't send to work queue
528 */
amdgpu_dm_irq_immediate_work(struct amdgpu_device * adev,enum dc_irq_source irq_source)529 static void amdgpu_dm_irq_immediate_work(struct amdgpu_device *adev,
530 enum dc_irq_source irq_source)
531 {
532 struct amdgpu_dm_irq_handler_data *handler_data;
533 unsigned long irq_table_flags;
534
535 DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
536
537 list_for_each_entry(handler_data,
538 &adev->dm.irq_handler_list_high_tab[irq_source],
539 list) {
540 /* Call a subcomponent which registered for immediate
541 * interrupt notification */
542 handler_data->handler(handler_data->handler_arg);
543 }
544
545 DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
546 }
547
548 /**
549 * amdgpu_dm_irq_handler - Generic DM IRQ handler
550 * @adev: amdgpu base driver device containing the DM device
551 * @source: Unused
552 * @entry: Data about the triggered interrupt
553 *
554 * Calls all registered high irq work immediately, and schedules work for low
555 * irq. The DM IRQ table is used to find the corresponding handlers.
556 */
amdgpu_dm_irq_handler(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)557 static int amdgpu_dm_irq_handler(struct amdgpu_device *adev,
558 struct amdgpu_irq_src *source,
559 struct amdgpu_iv_entry *entry)
560 {
561
562 enum dc_irq_source src =
563 dc_interrupt_to_irq_source(
564 adev->dm.dc,
565 entry->src_id,
566 entry->src_data[0]);
567
568 dc_interrupt_ack(adev->dm.dc, src);
569
570 /* Call high irq work immediately */
571 amdgpu_dm_irq_immediate_work(adev, src);
572 /*Schedule low_irq work */
573 amdgpu_dm_irq_schedule_work(adev, src);
574
575 return 0;
576 }
577
amdgpu_dm_hpd_to_dal_irq_source(unsigned type)578 static enum dc_irq_source amdgpu_dm_hpd_to_dal_irq_source(unsigned type)
579 {
580 switch (type) {
581 case AMDGPU_HPD_1:
582 return DC_IRQ_SOURCE_HPD1;
583 case AMDGPU_HPD_2:
584 return DC_IRQ_SOURCE_HPD2;
585 case AMDGPU_HPD_3:
586 return DC_IRQ_SOURCE_HPD3;
587 case AMDGPU_HPD_4:
588 return DC_IRQ_SOURCE_HPD4;
589 case AMDGPU_HPD_5:
590 return DC_IRQ_SOURCE_HPD5;
591 case AMDGPU_HPD_6:
592 return DC_IRQ_SOURCE_HPD6;
593 default:
594 return DC_IRQ_SOURCE_INVALID;
595 }
596 }
597
amdgpu_dm_set_hpd_irq_state(struct amdgpu_device * adev,struct amdgpu_irq_src * source,unsigned type,enum amdgpu_interrupt_state state)598 static int amdgpu_dm_set_hpd_irq_state(struct amdgpu_device *adev,
599 struct amdgpu_irq_src *source,
600 unsigned type,
601 enum amdgpu_interrupt_state state)
602 {
603 enum dc_irq_source src = amdgpu_dm_hpd_to_dal_irq_source(type);
604 bool st = (state == AMDGPU_IRQ_STATE_ENABLE);
605
606 dc_interrupt_set(adev->dm.dc, src, st);
607 return 0;
608 }
609
dm_irq_state(struct amdgpu_device * adev,struct amdgpu_irq_src * source,unsigned crtc_id,enum amdgpu_interrupt_state state,const enum irq_type dal_irq_type,const char * func)610 static inline int dm_irq_state(struct amdgpu_device *adev,
611 struct amdgpu_irq_src *source,
612 unsigned crtc_id,
613 enum amdgpu_interrupt_state state,
614 const enum irq_type dal_irq_type,
615 const char *func)
616 {
617 bool st;
618 enum dc_irq_source irq_source;
619
620 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc_id];
621
622 if (!acrtc) {
623 DRM_ERROR(
624 "%s: crtc is NULL at id :%d\n",
625 func,
626 crtc_id);
627 return 0;
628 }
629
630 if (acrtc->otg_inst == -1)
631 return 0;
632
633 irq_source = dal_irq_type + acrtc->otg_inst;
634
635 st = (state == AMDGPU_IRQ_STATE_ENABLE);
636
637 dc_interrupt_set(adev->dm.dc, irq_source, st);
638 return 0;
639 }
640
amdgpu_dm_set_pflip_irq_state(struct amdgpu_device * adev,struct amdgpu_irq_src * source,unsigned crtc_id,enum amdgpu_interrupt_state state)641 static int amdgpu_dm_set_pflip_irq_state(struct amdgpu_device *adev,
642 struct amdgpu_irq_src *source,
643 unsigned crtc_id,
644 enum amdgpu_interrupt_state state)
645 {
646 return dm_irq_state(
647 adev,
648 source,
649 crtc_id,
650 state,
651 IRQ_TYPE_PFLIP,
652 __func__);
653 }
654
amdgpu_dm_set_crtc_irq_state(struct amdgpu_device * adev,struct amdgpu_irq_src * source,unsigned crtc_id,enum amdgpu_interrupt_state state)655 static int amdgpu_dm_set_crtc_irq_state(struct amdgpu_device *adev,
656 struct amdgpu_irq_src *source,
657 unsigned crtc_id,
658 enum amdgpu_interrupt_state state)
659 {
660 return dm_irq_state(
661 adev,
662 source,
663 crtc_id,
664 state,
665 IRQ_TYPE_VBLANK,
666 __func__);
667 }
668
amdgpu_dm_set_vupdate_irq_state(struct amdgpu_device * adev,struct amdgpu_irq_src * source,unsigned int crtc_id,enum amdgpu_interrupt_state state)669 static int amdgpu_dm_set_vupdate_irq_state(struct amdgpu_device *adev,
670 struct amdgpu_irq_src *source,
671 unsigned int crtc_id,
672 enum amdgpu_interrupt_state state)
673 {
674 return dm_irq_state(
675 adev,
676 source,
677 crtc_id,
678 state,
679 IRQ_TYPE_VUPDATE,
680 __func__);
681 }
682
683 static const struct amdgpu_irq_src_funcs dm_crtc_irq_funcs = {
684 .set = amdgpu_dm_set_crtc_irq_state,
685 .process = amdgpu_dm_irq_handler,
686 };
687
688 static const struct amdgpu_irq_src_funcs dm_vupdate_irq_funcs = {
689 .set = amdgpu_dm_set_vupdate_irq_state,
690 .process = amdgpu_dm_irq_handler,
691 };
692
693 static const struct amdgpu_irq_src_funcs dm_pageflip_irq_funcs = {
694 .set = amdgpu_dm_set_pflip_irq_state,
695 .process = amdgpu_dm_irq_handler,
696 };
697
698 static const struct amdgpu_irq_src_funcs dm_hpd_irq_funcs = {
699 .set = amdgpu_dm_set_hpd_irq_state,
700 .process = amdgpu_dm_irq_handler,
701 };
702
amdgpu_dm_set_irq_funcs(struct amdgpu_device * adev)703 void amdgpu_dm_set_irq_funcs(struct amdgpu_device *adev)
704 {
705
706 adev->crtc_irq.num_types = adev->mode_info.num_crtc;
707 adev->crtc_irq.funcs = &dm_crtc_irq_funcs;
708
709 adev->vupdate_irq.num_types = adev->mode_info.num_crtc;
710 adev->vupdate_irq.funcs = &dm_vupdate_irq_funcs;
711
712 adev->pageflip_irq.num_types = adev->mode_info.num_crtc;
713 adev->pageflip_irq.funcs = &dm_pageflip_irq_funcs;
714
715 adev->hpd_irq.num_types = adev->mode_info.num_hpd;
716 adev->hpd_irq.funcs = &dm_hpd_irq_funcs;
717 }
718
719 /**
720 * amdgpu_dm_hpd_init - hpd setup callback.
721 *
722 * @adev: amdgpu_device pointer
723 *
724 * Setup the hpd pins used by the card (evergreen+).
725 * Enable the pin, set the polarity, and enable the hpd interrupts.
726 */
amdgpu_dm_hpd_init(struct amdgpu_device * adev)727 void amdgpu_dm_hpd_init(struct amdgpu_device *adev)
728 {
729 struct drm_device *dev = adev->ddev;
730 struct drm_connector *connector;
731 struct drm_connector_list_iter iter;
732
733 drm_connector_list_iter_begin(dev, &iter);
734 drm_for_each_connector_iter(connector, &iter) {
735 struct amdgpu_dm_connector *amdgpu_dm_connector =
736 to_amdgpu_dm_connector(connector);
737
738 const struct dc_link *dc_link = amdgpu_dm_connector->dc_link;
739
740 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
741 dc_interrupt_set(adev->dm.dc,
742 dc_link->irq_source_hpd,
743 true);
744 }
745
746 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
747 dc_interrupt_set(adev->dm.dc,
748 dc_link->irq_source_hpd_rx,
749 true);
750 }
751 }
752 drm_connector_list_iter_end(&iter);
753 }
754
755 /**
756 * amdgpu_dm_hpd_fini - hpd tear down callback.
757 *
758 * @adev: amdgpu_device pointer
759 *
760 * Tear down the hpd pins used by the card (evergreen+).
761 * Disable the hpd interrupts.
762 */
amdgpu_dm_hpd_fini(struct amdgpu_device * adev)763 void amdgpu_dm_hpd_fini(struct amdgpu_device *adev)
764 {
765 struct drm_device *dev = adev->ddev;
766 struct drm_connector *connector;
767 struct drm_connector_list_iter iter;
768
769 drm_connector_list_iter_begin(dev, &iter);
770 drm_for_each_connector_iter(connector, &iter) {
771 struct amdgpu_dm_connector *amdgpu_dm_connector =
772 to_amdgpu_dm_connector(connector);
773 const struct dc_link *dc_link = amdgpu_dm_connector->dc_link;
774
775 dc_interrupt_set(adev->dm.dc, dc_link->irq_source_hpd, false);
776
777 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
778 dc_interrupt_set(adev->dm.dc,
779 dc_link->irq_source_hpd_rx,
780 false);
781 }
782 }
783 drm_connector_list_iter_end(&iter);
784 }
785