1 /* $NetBSD: via_irq.c,v 1.9 2021/12/19 12:30:23 riastradh Exp $ */ 2 3 /* via_irq.c 4 * 5 * Copyright 2004 BEAM Ltd. 6 * Copyright 2002 Tungsten Graphics, Inc. 7 * Copyright 2005 Thomas Hellstrom. 8 * All Rights Reserved. 9 * 10 * Permission is hereby granted, free of charge, to any person obtaining a 11 * copy of this software and associated documentation files (the "Software"), 12 * to deal in the Software without restriction, including without limitation 13 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 14 * and/or sell copies of the Software, and to permit persons to whom the 15 * Software is furnished to do so, subject to the following conditions: 16 * 17 * The above copyright notice and this permission notice (including the next 18 * paragraph) shall be included in all copies or substantial portions of the 19 * Software. 20 * 21 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 22 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 23 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 24 * BEAM LTD, TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 25 * DAMAGES OR 26 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 27 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 28 * DEALINGS IN THE SOFTWARE. 29 * 30 * Authors: 31 * Terry Barnaby <terry1 (at) beam.ltd.uk> 32 * Keith Whitwell <keith (at) tungstengraphics.com> 33 * Thomas Hellstrom <unichrome (at) shipmail.org> 34 * 35 * This code provides standard DRM access to the Via Unichrome / Pro Vertical blank 36 * interrupt, as well as an infrastructure to handle other interrupts of the chip. 37 * The refresh rate is also calculated for video playback sync purposes. 38 */ 39 40 #include <sys/cdefs.h> 41 __KERNEL_RCSID(0, "$NetBSD: via_irq.c,v 1.9 2021/12/19 12:30:23 riastradh Exp $"); 42 43 #include <drm/drm_device.h> 44 #include <drm/drm_vblank.h> 45 #include <drm/via_drm.h> 46 47 #include "via_drv.h" 48 49 #define VIA_REG_INTERRUPT 0x200 50 51 /* VIA_REG_INTERRUPT */ 52 #define VIA_IRQ_GLOBAL (1 << 31) 53 #define VIA_IRQ_VBLANK_ENABLE (1 << 19) 54 #define VIA_IRQ_VBLANK_PENDING (1 << 3) 55 #define VIA_IRQ_HQV0_ENABLE (1 << 11) 56 #define VIA_IRQ_HQV1_ENABLE (1 << 25) 57 #define VIA_IRQ_HQV0_PENDING (1 << 9) 58 #define VIA_IRQ_HQV1_PENDING (1 << 10) 59 #define VIA_IRQ_DMA0_DD_ENABLE (1 << 20) 60 #define VIA_IRQ_DMA0_TD_ENABLE (1 << 21) 61 #define VIA_IRQ_DMA1_DD_ENABLE (1 << 22) 62 #define VIA_IRQ_DMA1_TD_ENABLE (1 << 23) 63 #define VIA_IRQ_DMA0_DD_PENDING (1 << 4) 64 #define VIA_IRQ_DMA0_TD_PENDING (1 << 5) 65 #define VIA_IRQ_DMA1_DD_PENDING (1 << 6) 66 #define VIA_IRQ_DMA1_TD_PENDING (1 << 7) 67 68 69 /* 70 * Device-specific IRQs go here. This type might need to be extended with 71 * the register if there are multiple IRQ control registers. 72 * Currently we activate the HQV interrupts of Unichrome Pro group A. 73 */ 74 75 static maskarray_t via_pro_group_a_irqs[] = { 76 {VIA_IRQ_HQV0_ENABLE, VIA_IRQ_HQV0_PENDING, 0x000003D0, 0x00008010, 77 0x00000000 }, 78 {VIA_IRQ_HQV1_ENABLE, VIA_IRQ_HQV1_PENDING, 0x000013D0, 0x00008010, 79 0x00000000 }, 80 {VIA_IRQ_DMA0_TD_ENABLE, VIA_IRQ_DMA0_TD_PENDING, VIA_PCI_DMA_CSR0, 81 VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008}, 82 {VIA_IRQ_DMA1_TD_ENABLE, VIA_IRQ_DMA1_TD_PENDING, VIA_PCI_DMA_CSR1, 83 VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008}, 84 }; 85 static int via_num_pro_group_a = ARRAY_SIZE(via_pro_group_a_irqs); 86 static int via_irqmap_pro_group_a[] = {0, 1, -1, 2, -1, 3}; 87 88 static maskarray_t via_unichrome_irqs[] = { 89 {VIA_IRQ_DMA0_TD_ENABLE, VIA_IRQ_DMA0_TD_PENDING, VIA_PCI_DMA_CSR0, 90 VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008}, 91 {VIA_IRQ_DMA1_TD_ENABLE, VIA_IRQ_DMA1_TD_PENDING, VIA_PCI_DMA_CSR1, 92 VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008} 93 }; 94 static int via_num_unichrome = ARRAY_SIZE(via_unichrome_irqs); 95 static int via_irqmap_unichrome[] = {-1, -1, -1, 0, -1, 1}; 96 97 98 u32 via_get_vblank_counter(struct drm_device *dev, unsigned int pipe) 99 { 100 drm_via_private_t *dev_priv = dev->dev_private; 101 102 if (pipe != 0) 103 return 0; 104 105 return atomic_read(&dev_priv->vbl_received); 106 } 107 108 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS) 109 { 110 struct drm_device *dev = (struct drm_device *) arg; 111 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; 112 u32 status; 113 int handled = 0; 114 ktime_t cur_vblank; 115 drm_via_irq_t *cur_irq = dev_priv->via_irqs; 116 int i; 117 118 status = via_read(dev_priv, VIA_REG_INTERRUPT); 119 if (status & VIA_IRQ_VBLANK_PENDING) { 120 atomic_inc(&dev_priv->vbl_received); 121 if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) { 122 cur_vblank = ktime_get(); 123 if (dev_priv->last_vblank_valid) { 124 dev_priv->nsec_per_vblank = 125 ktime_sub(cur_vblank, 126 dev_priv->last_vblank) >> 4; 127 } 128 dev_priv->last_vblank = cur_vblank; 129 dev_priv->last_vblank_valid = 1; 130 } 131 if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) { 132 DRM_DEBUG("nsec per vblank is: %"PRIu64"\n", 133 ktime_to_ns(dev_priv->nsec_per_vblank)); 134 } 135 drm_handle_vblank(dev, 0); 136 handled = 1; 137 } 138 139 for (i = 0; i < dev_priv->num_irqs; ++i) { 140 if (status & cur_irq->pending_mask) { 141 #ifdef __NetBSD__ 142 spin_lock(&cur_irq->irq_lock); 143 cur_irq->irq_received++; 144 DRM_SPIN_WAKEUP_ONE(&cur_irq->irq_queue, 145 &cur_irq->irq_lock); 146 spin_unlock(&cur_irq->irq_lock); 147 #else 148 atomic_inc(&cur_irq->irq_received); 149 wake_up(&cur_irq->irq_queue); 150 #endif 151 handled = 1; 152 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i) 153 via_dmablit_handler(dev, 0, 1); 154 else if (dev_priv->irq_map[drm_via_irq_dma1_td] == i) 155 via_dmablit_handler(dev, 1, 1); 156 } 157 cur_irq++; 158 } 159 160 /* Acknowledge interrupts */ 161 via_write(dev_priv, VIA_REG_INTERRUPT, status); 162 163 164 if (handled) 165 return IRQ_HANDLED; 166 else 167 return IRQ_NONE; 168 } 169 170 static __inline__ void viadrv_acknowledge_irqs(drm_via_private_t *dev_priv) 171 { 172 u32 status; 173 174 if (dev_priv) { 175 /* Acknowledge interrupts */ 176 status = via_read(dev_priv, VIA_REG_INTERRUPT); 177 via_write(dev_priv, VIA_REG_INTERRUPT, status | 178 dev_priv->irq_pending_mask); 179 } 180 } 181 182 int via_enable_vblank(struct drm_device *dev, unsigned int pipe) 183 { 184 drm_via_private_t *dev_priv = dev->dev_private; 185 u32 status; 186 187 if (pipe != 0) { 188 DRM_ERROR("%s: bad crtc %u\n", __func__, pipe); 189 return -EINVAL; 190 } 191 192 status = via_read(dev_priv, VIA_REG_INTERRUPT); 193 via_write(dev_priv, VIA_REG_INTERRUPT, status | VIA_IRQ_VBLANK_ENABLE); 194 195 via_write8(dev_priv, 0x83d4, 0x11); 196 via_write8_mask(dev_priv, 0x83d5, 0x30, 0x30); 197 198 return 0; 199 } 200 201 void via_disable_vblank(struct drm_device *dev, unsigned int pipe) 202 { 203 drm_via_private_t *dev_priv = dev->dev_private; 204 u32 status; 205 206 status = via_read(dev_priv, VIA_REG_INTERRUPT); 207 via_write(dev_priv, VIA_REG_INTERRUPT, status & ~VIA_IRQ_VBLANK_ENABLE); 208 209 via_write8(dev_priv, 0x83d4, 0x11); 210 via_write8_mask(dev_priv, 0x83d5, 0x30, 0); 211 212 if (pipe != 0) 213 DRM_ERROR("%s: bad crtc %u\n", __func__, pipe); 214 } 215 216 static int 217 via_driver_irq_wait(struct drm_device *dev, unsigned int irq, int force_sequence, 218 unsigned int *sequence) 219 { 220 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; 221 unsigned int cur_irq_sequence; 222 drm_via_irq_t *cur_irq; 223 int ret = 0; 224 maskarray_t *masks; 225 int real_irq; 226 227 DRM_DEBUG("\n"); 228 229 if (!dev_priv) { 230 DRM_ERROR("called with no initialization\n"); 231 return -EINVAL; 232 } 233 234 if (irq >= drm_via_irq_num) { 235 DRM_ERROR("Trying to wait on unknown irq %d\n", irq); 236 return -EINVAL; 237 } 238 239 real_irq = dev_priv->irq_map[irq]; 240 241 if (real_irq < 0) { 242 DRM_ERROR("Video IRQ %d not available on this hardware.\n", 243 irq); 244 return -EINVAL; 245 } 246 247 masks = dev_priv->irq_masks; 248 cur_irq = dev_priv->via_irqs + real_irq; 249 250 #ifdef __NetBSD__ 251 spin_lock(&cur_irq->irq_lock); 252 if (masks[real_irq][2] && !force_sequence) { 253 DRM_SPIN_WAIT_ON(ret, &cur_irq->irq_queue, &cur_irq->irq_lock, 254 3 * HZ, 255 ((via_read(dev_priv, masks[irq][2]) & masks[irq][3]) == 256 masks[irq][4])); 257 cur_irq_sequence = cur_irq->irq_received; 258 } else { 259 DRM_SPIN_WAIT_ON(ret, &cur_irq->irq_queue, &cur_irq->irq_lock, 260 3 * HZ, 261 (((cur_irq_sequence = cur_irq->irq_received) - 262 *sequence) <= (1 << 23))); 263 } 264 spin_unlock(&cur_irq->irq_lock); 265 #else 266 if (masks[real_irq][2] && !force_sequence) { 267 VIA_WAIT_ON(ret, cur_irq->irq_queue, 3 * HZ, 268 ((via_read(dev_priv, masks[irq][2]) & masks[irq][3]) == 269 masks[irq][4])); 270 cur_irq_sequence = atomic_read(&cur_irq->irq_received); 271 } else { 272 VIA_WAIT_ON(ret, cur_irq->irq_queue, 3 * HZ, 273 (((cur_irq_sequence = 274 atomic_read(&cur_irq->irq_received)) - 275 *sequence) <= (1 << 23))); 276 } 277 #endif 278 *sequence = cur_irq_sequence; 279 return ret; 280 } 281 282 283 /* 284 * drm_dma.h hooks 285 */ 286 287 void via_driver_irq_preinstall(struct drm_device *dev) 288 { 289 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; 290 u32 status; 291 drm_via_irq_t *cur_irq; 292 int i; 293 294 DRM_DEBUG("dev_priv: %p\n", dev_priv); 295 if (dev_priv) { 296 cur_irq = dev_priv->via_irqs; 297 298 dev_priv->irq_enable_mask = VIA_IRQ_VBLANK_ENABLE; 299 dev_priv->irq_pending_mask = VIA_IRQ_VBLANK_PENDING; 300 301 if (dev_priv->chipset == VIA_PRO_GROUP_A || 302 dev_priv->chipset == VIA_DX9_0) { 303 dev_priv->irq_masks = via_pro_group_a_irqs; 304 dev_priv->num_irqs = via_num_pro_group_a; 305 dev_priv->irq_map = via_irqmap_pro_group_a; 306 } else { 307 dev_priv->irq_masks = via_unichrome_irqs; 308 dev_priv->num_irqs = via_num_unichrome; 309 dev_priv->irq_map = via_irqmap_unichrome; 310 } 311 312 for (i = 0; i < dev_priv->num_irqs; ++i) { 313 #ifdef __NetBSD__ 314 spin_lock_init(&cur_irq->irq_lock); 315 cur_irq->irq_received = 0; 316 #else 317 atomic_set(&cur_irq->irq_received, 0); 318 #endif 319 cur_irq->enable_mask = dev_priv->irq_masks[i][0]; 320 cur_irq->pending_mask = dev_priv->irq_masks[i][1]; 321 #ifdef __NetBSD__ 322 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue, "viairq"); 323 #else 324 init_waitqueue_head(&cur_irq->irq_queue); 325 #endif 326 dev_priv->irq_enable_mask |= cur_irq->enable_mask; 327 dev_priv->irq_pending_mask |= cur_irq->pending_mask; 328 cur_irq++; 329 330 DRM_DEBUG("Initializing IRQ %d\n", i); 331 } 332 333 dev_priv->last_vblank_valid = 0; 334 335 /* Clear VSync interrupt regs */ 336 status = via_read(dev_priv, VIA_REG_INTERRUPT); 337 via_write(dev_priv, VIA_REG_INTERRUPT, status & 338 ~(dev_priv->irq_enable_mask)); 339 340 /* Clear bits if they're already high */ 341 viadrv_acknowledge_irqs(dev_priv); 342 } 343 } 344 345 int via_driver_irq_postinstall(struct drm_device *dev) 346 { 347 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; 348 u32 status; 349 350 DRM_DEBUG("via_driver_irq_postinstall\n"); 351 if (!dev_priv) 352 return -EINVAL; 353 354 status = via_read(dev_priv, VIA_REG_INTERRUPT); 355 via_write(dev_priv, VIA_REG_INTERRUPT, status | VIA_IRQ_GLOBAL 356 | dev_priv->irq_enable_mask); 357 358 /* Some magic, oh for some data sheets ! */ 359 via_write8(dev_priv, 0x83d4, 0x11); 360 via_write8_mask(dev_priv, 0x83d5, 0x30, 0x30); 361 362 return 0; 363 } 364 365 void via_driver_irq_uninstall(struct drm_device *dev) 366 { 367 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; 368 u32 status; 369 370 DRM_DEBUG("\n"); 371 if (dev_priv) { 372 373 /* Some more magic, oh for some data sheets ! */ 374 375 via_write8(dev_priv, 0x83d4, 0x11); 376 via_write8_mask(dev_priv, 0x83d5, 0x30, 0); 377 378 status = via_read(dev_priv, VIA_REG_INTERRUPT); 379 via_write(dev_priv, VIA_REG_INTERRUPT, status & 380 ~(VIA_IRQ_VBLANK_ENABLE | dev_priv->irq_enable_mask)); 381 382 #ifdef __NetBSD__ 383 { 384 int i; 385 386 for (i = 0; i < dev_priv->num_irqs; i++) { 387 DRM_DESTROY_WAITQUEUE(&dev_priv->via_irqs[i].irq_queue); 388 spin_lock_destroy(&dev_priv->via_irqs[i].irq_lock); 389 } 390 } 391 #endif 392 } 393 } 394 395 int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv) 396 { 397 drm_via_irqwait_t *irqwait = data; 398 struct timespec64 now; 399 int ret = 0; 400 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; 401 drm_via_irq_t *cur_irq = dev_priv->via_irqs; 402 int force_sequence; 403 404 if (irqwait->request.irq >= dev_priv->num_irqs) { 405 DRM_ERROR("Trying to wait on unknown irq %d\n", 406 irqwait->request.irq); 407 return -EINVAL; 408 } 409 410 cur_irq += irqwait->request.irq; 411 412 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) { 413 case VIA_IRQ_RELATIVE: 414 #ifdef __NetBSD__ 415 irqwait->request.sequence += cur_irq->irq_received; 416 #else 417 irqwait->request.sequence += 418 atomic_read(&cur_irq->irq_received); 419 #endif 420 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE; 421 case VIA_IRQ_ABSOLUTE: 422 break; 423 default: 424 return -EINVAL; 425 } 426 427 if (irqwait->request.type & VIA_IRQ_SIGNAL) { 428 DRM_ERROR("Signals on Via IRQs not implemented yet.\n"); 429 return -EINVAL; 430 } 431 432 force_sequence = (irqwait->request.type & VIA_IRQ_FORCE_SEQUENCE); 433 434 ret = via_driver_irq_wait(dev, irqwait->request.irq, force_sequence, 435 &irqwait->request.sequence); 436 ktime_get_ts64(&now); 437 irqwait->reply.tval_sec = now.tv_sec; 438 irqwait->reply.tval_usec = now.tv_nsec / NSEC_PER_USEC; 439 440 return ret; 441 } 442