1 /* $NetBSD: via_irq.c,v 1.7 2020/02/14 04:37:43 riastradh Exp $ */ 2 3 /* via_irq.c 4 * 5 * Copyright 2004 BEAM Ltd. 6 * Copyright 2002 Tungsten Graphics, Inc. 7 * Copyright 2005 Thomas Hellstrom. 8 * All Rights Reserved. 9 * 10 * Permission is hereby granted, free of charge, to any person obtaining a 11 * copy of this software and associated documentation files (the "Software"), 12 * to deal in the Software without restriction, including without limitation 13 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 14 * and/or sell copies of the Software, and to permit persons to whom the 15 * Software is furnished to do so, subject to the following conditions: 16 * 17 * The above copyright notice and this permission notice (including the next 18 * paragraph) shall be included in all copies or substantial portions of the 19 * Software. 20 * 21 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 22 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 23 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 24 * BEAM LTD, TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 25 * DAMAGES OR 26 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 27 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 28 * DEALINGS IN THE SOFTWARE. 29 * 30 * Authors: 31 * Terry Barnaby <terry1@beam.ltd.uk> 32 * Keith Whitwell <keith@tungstengraphics.com> 33 * Thomas Hellstrom <unichrome@shipmail.org> 34 * 35 * This code provides standard DRM access to the Via Unichrome / Pro Vertical blank 36 * interrupt, as well as an infrastructure to handle other interrupts of the chip. 37 * The refresh rate is also calculated for video playback sync purposes. 38 */ 39 40 #include <sys/cdefs.h> 41 __KERNEL_RCSID(0, "$NetBSD: via_irq.c,v 1.7 2020/02/14 04:37:43 riastradh Exp $"); 42 43 #include <drm/drmP.h> 44 #include <drm/via_drm.h> 45 #include "via_drv.h" 46 47 #define VIA_REG_INTERRUPT 0x200 48 49 /* VIA_REG_INTERRUPT */ 50 #define VIA_IRQ_GLOBAL (1 << 31) 51 #define VIA_IRQ_VBLANK_ENABLE (1 << 19) 52 #define VIA_IRQ_VBLANK_PENDING (1 << 3) 53 #define VIA_IRQ_HQV0_ENABLE (1 << 11) 54 #define VIA_IRQ_HQV1_ENABLE (1 << 25) 55 #define VIA_IRQ_HQV0_PENDING (1 << 9) 56 #define VIA_IRQ_HQV1_PENDING (1 << 10) 57 #define VIA_IRQ_DMA0_DD_ENABLE (1 << 20) 58 #define VIA_IRQ_DMA0_TD_ENABLE (1 << 21) 59 #define VIA_IRQ_DMA1_DD_ENABLE (1 << 22) 60 #define VIA_IRQ_DMA1_TD_ENABLE (1 << 23) 61 #define VIA_IRQ_DMA0_DD_PENDING (1 << 4) 62 #define VIA_IRQ_DMA0_TD_PENDING (1 << 5) 63 #define VIA_IRQ_DMA1_DD_PENDING (1 << 6) 64 #define VIA_IRQ_DMA1_TD_PENDING (1 << 7) 65 66 67 /* 68 * Device-specific IRQs go here. This type might need to be extended with 69 * the register if there are multiple IRQ control registers. 70 * Currently we activate the HQV interrupts of Unichrome Pro group A. 71 */ 72 73 static maskarray_t via_pro_group_a_irqs[] = { 74 {VIA_IRQ_HQV0_ENABLE, VIA_IRQ_HQV0_PENDING, 0x000003D0, 0x00008010, 75 0x00000000 }, 76 {VIA_IRQ_HQV1_ENABLE, VIA_IRQ_HQV1_PENDING, 0x000013D0, 0x00008010, 77 0x00000000 }, 78 {VIA_IRQ_DMA0_TD_ENABLE, VIA_IRQ_DMA0_TD_PENDING, VIA_PCI_DMA_CSR0, 79 VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008}, 80 {VIA_IRQ_DMA1_TD_ENABLE, VIA_IRQ_DMA1_TD_PENDING, VIA_PCI_DMA_CSR1, 81 VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008}, 82 }; 83 static int via_num_pro_group_a = ARRAY_SIZE(via_pro_group_a_irqs); 84 static int via_irqmap_pro_group_a[] = {0, 1, -1, 2, -1, 3}; 85 86 static maskarray_t via_unichrome_irqs[] = { 87 {VIA_IRQ_DMA0_TD_ENABLE, VIA_IRQ_DMA0_TD_PENDING, VIA_PCI_DMA_CSR0, 88 VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008}, 89 {VIA_IRQ_DMA1_TD_ENABLE, VIA_IRQ_DMA1_TD_PENDING, VIA_PCI_DMA_CSR1, 90 VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008} 91 }; 92 static int via_num_unichrome = ARRAY_SIZE(via_unichrome_irqs); 93 static int via_irqmap_unichrome[] = {-1, -1, -1, 0, -1, 1}; 94 95 96 static unsigned time_diff(struct timeval *now, struct timeval *then) 97 { 98 return (now->tv_usec >= then->tv_usec) ? 99 now->tv_usec - then->tv_usec : 100 1000000 - (then->tv_usec - now->tv_usec); 101 } 102 103 u32 via_get_vblank_counter(struct drm_device *dev, unsigned int pipe) 104 { 105 drm_via_private_t *dev_priv = dev->dev_private; 106 107 if (pipe != 0) 108 return 0; 109 110 return atomic_read(&dev_priv->vbl_received); 111 } 112 113 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS) 114 { 115 struct drm_device *dev = (struct drm_device *) arg; 116 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; 117 u32 status; 118 int handled = 0; 119 struct timeval cur_vblank; 120 drm_via_irq_t *cur_irq = dev_priv->via_irqs; 121 int i; 122 123 status = VIA_READ(VIA_REG_INTERRUPT); 124 if (status & VIA_IRQ_VBLANK_PENDING) { 125 atomic_inc(&dev_priv->vbl_received); 126 if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) { 127 do_gettimeofday(&cur_vblank); 128 if (dev_priv->last_vblank_valid) { 129 dev_priv->usec_per_vblank = 130 time_diff(&cur_vblank, 131 &dev_priv->last_vblank) >> 4; 132 } 133 dev_priv->last_vblank = cur_vblank; 134 dev_priv->last_vblank_valid = 1; 135 } 136 if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) { 137 DRM_DEBUG("US per vblank is: %u\n", 138 dev_priv->usec_per_vblank); 139 } 140 drm_handle_vblank(dev, 0); 141 handled = 1; 142 } 143 144 for (i = 0; i < dev_priv->num_irqs; ++i) { 145 if (status & cur_irq->pending_mask) { 146 #ifdef __NetBSD__ 147 spin_lock(&cur_irq->irq_lock); 148 cur_irq->irq_received++; 149 DRM_SPIN_WAKEUP_ONE(&cur_irq->irq_queue, 150 &cur_irq->irq_lock); 151 spin_unlock(&cur_irq->irq_lock); 152 #else 153 atomic_inc(&cur_irq->irq_received); 154 wake_up(&cur_irq->irq_queue); 155 #endif 156 handled = 1; 157 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i) 158 via_dmablit_handler(dev, 0, 1); 159 else if (dev_priv->irq_map[drm_via_irq_dma1_td] == i) 160 via_dmablit_handler(dev, 1, 1); 161 } 162 cur_irq++; 163 } 164 165 /* Acknowledge interrupts */ 166 VIA_WRITE(VIA_REG_INTERRUPT, status); 167 168 169 if (handled) 170 return IRQ_HANDLED; 171 else 172 return IRQ_NONE; 173 } 174 175 static __inline__ void viadrv_acknowledge_irqs(drm_via_private_t *dev_priv) 176 { 177 u32 status; 178 179 if (dev_priv) { 180 /* Acknowledge interrupts */ 181 status = VIA_READ(VIA_REG_INTERRUPT); 182 VIA_WRITE(VIA_REG_INTERRUPT, status | 183 dev_priv->irq_pending_mask); 184 } 185 } 186 187 int via_enable_vblank(struct drm_device *dev, unsigned int pipe) 188 { 189 drm_via_private_t *dev_priv = dev->dev_private; 190 u32 status; 191 192 if (pipe != 0) { 193 DRM_ERROR("%s: bad crtc %u\n", __func__, pipe); 194 return -EINVAL; 195 } 196 197 status = VIA_READ(VIA_REG_INTERRUPT); 198 VIA_WRITE(VIA_REG_INTERRUPT, status | VIA_IRQ_VBLANK_ENABLE); 199 200 VIA_WRITE8(0x83d4, 0x11); 201 VIA_WRITE8(0x83d5, VIA_READ8(0x83d5) | 0x30); 202 203 return 0; 204 } 205 206 void via_disable_vblank(struct drm_device *dev, unsigned int pipe) 207 { 208 drm_via_private_t *dev_priv = dev->dev_private; 209 u32 status; 210 211 status = VIA_READ(VIA_REG_INTERRUPT); 212 VIA_WRITE(VIA_REG_INTERRUPT, status & ~VIA_IRQ_VBLANK_ENABLE); 213 214 VIA_WRITE8(0x83d4, 0x11); 215 VIA_WRITE8(0x83d5, VIA_READ8(0x83d5) & ~0x30); 216 217 if (pipe != 0) 218 DRM_ERROR("%s: bad crtc %u\n", __func__, pipe); 219 } 220 221 static int 222 via_driver_irq_wait(struct drm_device *dev, unsigned int irq, int force_sequence, 223 unsigned int *sequence) 224 { 225 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; 226 unsigned int cur_irq_sequence; 227 drm_via_irq_t *cur_irq; 228 int ret = 0; 229 maskarray_t *masks; 230 int real_irq; 231 232 DRM_DEBUG("\n"); 233 234 if (!dev_priv) { 235 DRM_ERROR("called with no initialization\n"); 236 return -EINVAL; 237 } 238 239 if (irq >= drm_via_irq_num) { 240 DRM_ERROR("Trying to wait on unknown irq %d\n", irq); 241 return -EINVAL; 242 } 243 244 real_irq = dev_priv->irq_map[irq]; 245 246 if (real_irq < 0) { 247 DRM_ERROR("Video IRQ %d not available on this hardware.\n", 248 irq); 249 return -EINVAL; 250 } 251 252 masks = dev_priv->irq_masks; 253 cur_irq = dev_priv->via_irqs + real_irq; 254 255 #ifdef __NetBSD__ 256 spin_lock(&cur_irq->irq_lock); 257 if (masks[real_irq][2] && !force_sequence) { 258 DRM_SPIN_WAIT_ON(ret, &cur_irq->irq_queue, &cur_irq->irq_lock, 259 3 * HZ, 260 ((VIA_READ(masks[irq][2]) & masks[irq][3]) == 261 masks[irq][4])); 262 cur_irq_sequence = cur_irq->irq_received; 263 } else { 264 DRM_SPIN_WAIT_ON(ret, &cur_irq->irq_queue, &cur_irq->irq_lock, 265 3 * HZ, 266 (((cur_irq_sequence = cur_irq->irq_received) - 267 *sequence) <= (1 << 23))); 268 } 269 spin_unlock(&cur_irq->irq_lock); 270 #else 271 if (masks[real_irq][2] && !force_sequence) { 272 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * HZ, 273 ((VIA_READ(masks[irq][2]) & masks[irq][3]) == 274 masks[irq][4])); 275 cur_irq_sequence = atomic_read(&cur_irq->irq_received); 276 } else { 277 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * HZ, 278 (((cur_irq_sequence = 279 atomic_read(&cur_irq->irq_received)) - 280 *sequence) <= (1 << 23))); 281 } 282 #endif 283 *sequence = cur_irq_sequence; 284 return ret; 285 } 286 287 288 /* 289 * drm_dma.h hooks 290 */ 291 292 void via_driver_irq_preinstall(struct drm_device *dev) 293 { 294 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; 295 u32 status; 296 drm_via_irq_t *cur_irq; 297 int i; 298 299 DRM_DEBUG("dev_priv: %p\n", dev_priv); 300 if (dev_priv) { 301 cur_irq = dev_priv->via_irqs; 302 303 dev_priv->irq_enable_mask = VIA_IRQ_VBLANK_ENABLE; 304 dev_priv->irq_pending_mask = VIA_IRQ_VBLANK_PENDING; 305 306 if (dev_priv->chipset == VIA_PRO_GROUP_A || 307 dev_priv->chipset == VIA_DX9_0) { 308 dev_priv->irq_masks = via_pro_group_a_irqs; 309 dev_priv->num_irqs = via_num_pro_group_a; 310 dev_priv->irq_map = via_irqmap_pro_group_a; 311 } else { 312 dev_priv->irq_masks = via_unichrome_irqs; 313 dev_priv->num_irqs = via_num_unichrome; 314 dev_priv->irq_map = via_irqmap_unichrome; 315 } 316 317 for (i = 0; i < dev_priv->num_irqs; ++i) { 318 #ifdef __NetBSD__ 319 spin_lock_init(&cur_irq->irq_lock); 320 cur_irq->irq_received = 0; 321 #else 322 atomic_set(&cur_irq->irq_received, 0); 323 #endif 324 cur_irq->enable_mask = dev_priv->irq_masks[i][0]; 325 cur_irq->pending_mask = dev_priv->irq_masks[i][1]; 326 #ifdef __NetBSD__ 327 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue, "viairq"); 328 #else 329 init_waitqueue_head(&cur_irq->irq_queue); 330 #endif 331 dev_priv->irq_enable_mask |= cur_irq->enable_mask; 332 dev_priv->irq_pending_mask |= cur_irq->pending_mask; 333 cur_irq++; 334 335 DRM_DEBUG("Initializing IRQ %d\n", i); 336 } 337 338 dev_priv->last_vblank_valid = 0; 339 340 /* Clear VSync interrupt regs */ 341 status = VIA_READ(VIA_REG_INTERRUPT); 342 VIA_WRITE(VIA_REG_INTERRUPT, status & 343 ~(dev_priv->irq_enable_mask)); 344 345 /* Clear bits if they're already high */ 346 viadrv_acknowledge_irqs(dev_priv); 347 } 348 } 349 350 int via_driver_irq_postinstall(struct drm_device *dev) 351 { 352 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; 353 u32 status; 354 355 DRM_DEBUG("via_driver_irq_postinstall\n"); 356 if (!dev_priv) 357 return -EINVAL; 358 359 status = VIA_READ(VIA_REG_INTERRUPT); 360 VIA_WRITE(VIA_REG_INTERRUPT, status | VIA_IRQ_GLOBAL 361 | dev_priv->irq_enable_mask); 362 363 /* Some magic, oh for some data sheets ! */ 364 VIA_WRITE8(0x83d4, 0x11); 365 VIA_WRITE8(0x83d5, VIA_READ8(0x83d5) | 0x30); 366 367 return 0; 368 } 369 370 void via_driver_irq_uninstall(struct drm_device *dev) 371 { 372 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; 373 u32 status; 374 375 DRM_DEBUG("\n"); 376 if (dev_priv) { 377 378 /* Some more magic, oh for some data sheets ! */ 379 380 VIA_WRITE8(0x83d4, 0x11); 381 VIA_WRITE8(0x83d5, VIA_READ8(0x83d5) & ~0x30); 382 383 status = VIA_READ(VIA_REG_INTERRUPT); 384 VIA_WRITE(VIA_REG_INTERRUPT, status & 385 ~(VIA_IRQ_VBLANK_ENABLE | dev_priv->irq_enable_mask)); 386 387 #ifdef __NetBSD__ 388 { 389 int i; 390 391 for (i = 0; i < dev_priv->num_irqs; i++) { 392 DRM_DESTROY_WAITQUEUE(&dev_priv->via_irqs[i].irq_queue); 393 spin_lock_destroy(&dev_priv->via_irqs[i].irq_lock); 394 } 395 } 396 #endif 397 } 398 } 399 400 int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv) 401 { 402 drm_via_irqwait_t *irqwait = data; 403 struct timeval now; 404 int ret = 0; 405 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; 406 drm_via_irq_t *cur_irq = dev_priv->via_irqs; 407 int force_sequence; 408 409 if (irqwait->request.irq >= dev_priv->num_irqs) { 410 DRM_ERROR("Trying to wait on unknown irq %d\n", 411 irqwait->request.irq); 412 return -EINVAL; 413 } 414 415 cur_irq += irqwait->request.irq; 416 417 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) { 418 case VIA_IRQ_RELATIVE: 419 #ifdef __NetBSD__ 420 irqwait->request.sequence += cur_irq->irq_received; 421 #else 422 irqwait->request.sequence += 423 atomic_read(&cur_irq->irq_received); 424 #endif 425 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE; 426 case VIA_IRQ_ABSOLUTE: 427 break; 428 default: 429 return -EINVAL; 430 } 431 432 if (irqwait->request.type & VIA_IRQ_SIGNAL) { 433 DRM_ERROR("Signals on Via IRQs not implemented yet.\n"); 434 return -EINVAL; 435 } 436 437 force_sequence = (irqwait->request.type & VIA_IRQ_FORCE_SEQUENCE); 438 439 ret = via_driver_irq_wait(dev, irqwait->request.irq, force_sequence, 440 &irqwait->request.sequence); 441 do_gettimeofday(&now); 442 irqwait->reply.tval_sec = now.tv_sec; 443 irqwait->reply.tval_usec = now.tv_usec; 444 445 return ret; 446 } 447