Home | History | Annotate | Line # | Download | only in via
via_irq.c revision 1.2.2.1
      1 /* via_irq.c
      2  *
      3  * Copyright 2004 BEAM Ltd.
      4  * Copyright 2002 Tungsten Graphics, Inc.
      5  * Copyright 2005 Thomas Hellstrom.
      6  * All Rights Reserved.
      7  *
      8  * Permission is hereby granted, free of charge, to any person obtaining a
      9  * copy of this software and associated documentation files (the "Software"),
     10  * to deal in the Software without restriction, including without limitation
     11  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
     12  * and/or sell copies of the Software, and to permit persons to whom the
     13  * Software is furnished to do so, subject to the following conditions:
     14  *
     15  * The above copyright notice and this permission notice (including the next
     16  * paragraph) shall be included in all copies or substantial portions of the
     17  * Software.
     18  *
     19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     21  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     22  * BEAM LTD, TUNGSTEN GRAPHICS  AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
     23  * DAMAGES OR
     24  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
     25  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
     26  * DEALINGS IN THE SOFTWARE.
     27  *
     28  * Authors:
     29  *    Terry Barnaby <terry1 (at) beam.ltd.uk>
     30  *    Keith Whitwell <keith (at) tungstengraphics.com>
     31  *    Thomas Hellstrom <unichrome (at) shipmail.org>
     32  *
     33  * This code provides standard DRM access to the Via Unichrome / Pro Vertical blank
     34  * interrupt, as well as an infrastructure to handle other interrupts of the chip.
     35  * The refresh rate is also calculated for video playback sync purposes.
     36  */
     37 
     38 #include <drm/drmP.h>
     39 #include <drm/via_drm.h>
     40 #include "via_drv.h"
     41 
     42 #define VIA_REG_INTERRUPT       0x200
     43 
     44 /* VIA_REG_INTERRUPT */
     45 #define VIA_IRQ_GLOBAL	  (1 << 31)
     46 #define VIA_IRQ_VBLANK_ENABLE   (1 << 19)
     47 #define VIA_IRQ_VBLANK_PENDING  (1 << 3)
     48 #define VIA_IRQ_HQV0_ENABLE     (1 << 11)
     49 #define VIA_IRQ_HQV1_ENABLE     (1 << 25)
     50 #define VIA_IRQ_HQV0_PENDING    (1 << 9)
     51 #define VIA_IRQ_HQV1_PENDING    (1 << 10)
     52 #define VIA_IRQ_DMA0_DD_ENABLE  (1 << 20)
     53 #define VIA_IRQ_DMA0_TD_ENABLE  (1 << 21)
     54 #define VIA_IRQ_DMA1_DD_ENABLE  (1 << 22)
     55 #define VIA_IRQ_DMA1_TD_ENABLE  (1 << 23)
     56 #define VIA_IRQ_DMA0_DD_PENDING (1 << 4)
     57 #define VIA_IRQ_DMA0_TD_PENDING (1 << 5)
     58 #define VIA_IRQ_DMA1_DD_PENDING (1 << 6)
     59 #define VIA_IRQ_DMA1_TD_PENDING (1 << 7)
     60 
     61 
     62 /*
     63  * Device-specific IRQs go here. This type might need to be extended with
     64  * the register if there are multiple IRQ control registers.
     65  * Currently we activate the HQV interrupts of  Unichrome Pro group A.
     66  */
     67 
     68 static maskarray_t via_pro_group_a_irqs[] = {
     69 	{VIA_IRQ_HQV0_ENABLE, VIA_IRQ_HQV0_PENDING, 0x000003D0, 0x00008010,
     70 	 0x00000000 },
     71 	{VIA_IRQ_HQV1_ENABLE, VIA_IRQ_HQV1_PENDING, 0x000013D0, 0x00008010,
     72 	 0x00000000 },
     73 	{VIA_IRQ_DMA0_TD_ENABLE, VIA_IRQ_DMA0_TD_PENDING, VIA_PCI_DMA_CSR0,
     74 	 VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008},
     75 	{VIA_IRQ_DMA1_TD_ENABLE, VIA_IRQ_DMA1_TD_PENDING, VIA_PCI_DMA_CSR1,
     76 	 VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008},
     77 };
     78 static int via_num_pro_group_a = ARRAY_SIZE(via_pro_group_a_irqs);
     79 static int via_irqmap_pro_group_a[] = {0, 1, -1, 2, -1, 3};
     80 
     81 static maskarray_t via_unichrome_irqs[] = {
     82 	{VIA_IRQ_DMA0_TD_ENABLE, VIA_IRQ_DMA0_TD_PENDING, VIA_PCI_DMA_CSR0,
     83 	 VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008},
     84 	{VIA_IRQ_DMA1_TD_ENABLE, VIA_IRQ_DMA1_TD_PENDING, VIA_PCI_DMA_CSR1,
     85 	 VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008}
     86 };
     87 static int via_num_unichrome = ARRAY_SIZE(via_unichrome_irqs);
     88 static int via_irqmap_unichrome[] = {-1, -1, -1, 0, -1, 1};
     89 
     90 
     91 static unsigned time_diff(struct timeval *now, struct timeval *then)
     92 {
     93 	return (now->tv_usec >= then->tv_usec) ?
     94 		now->tv_usec - then->tv_usec :
     95 		1000000 - (then->tv_usec - now->tv_usec);
     96 }
     97 
     98 u32 via_get_vblank_counter(struct drm_device *dev, int crtc)
     99 {
    100 	drm_via_private_t *dev_priv = dev->dev_private;
    101 	if (crtc != 0)
    102 		return 0;
    103 
    104 	return atomic_read(&dev_priv->vbl_received);
    105 }
    106 
    107 irqreturn_t via_driver_irq_handler(int irq, void *arg)
    108 {
    109 	struct drm_device *dev = (struct drm_device *) arg;
    110 	drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
    111 	u32 status;
    112 	int handled = 0;
    113 	struct timeval cur_vblank;
    114 	drm_via_irq_t *cur_irq = dev_priv->via_irqs;
    115 	int i;
    116 
    117 	status = VIA_READ(VIA_REG_INTERRUPT);
    118 	if (status & VIA_IRQ_VBLANK_PENDING) {
    119 		atomic_inc(&dev_priv->vbl_received);
    120 		if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
    121 			do_gettimeofday(&cur_vblank);
    122 			if (dev_priv->last_vblank_valid) {
    123 				dev_priv->usec_per_vblank =
    124 					time_diff(&cur_vblank,
    125 						  &dev_priv->last_vblank) >> 4;
    126 			}
    127 			dev_priv->last_vblank = cur_vblank;
    128 			dev_priv->last_vblank_valid = 1;
    129 		}
    130 		if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
    131 			DRM_DEBUG("US per vblank is: %u\n",
    132 				  dev_priv->usec_per_vblank);
    133 		}
    134 		drm_handle_vblank(dev, 0);
    135 		handled = 1;
    136 	}
    137 
    138 	for (i = 0; i < dev_priv->num_irqs; ++i) {
    139 		if (status & cur_irq->pending_mask) {
    140 #ifdef __NetBSD__
    141 			spin_lock(&cur_irq->irq_lock);
    142 			cur_irq->irq_received++;
    143 			DRM_SPIN_WAKEUP_ONE(&cur_irq->irq_queue,
    144 			    &cur_irq->irq_lock);
    145 			spin_unlock(&cur_irq->irq_lock);
    146 #else
    147 			atomic_inc(&cur_irq->irq_received);
    148 			wake_up(&cur_irq->irq_queue);
    149 #endif
    150 			handled = 1;
    151 			if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
    152 				via_dmablit_handler(dev, 0, 1);
    153 			else if (dev_priv->irq_map[drm_via_irq_dma1_td] == i)
    154 				via_dmablit_handler(dev, 1, 1);
    155 		}
    156 		cur_irq++;
    157 	}
    158 
    159 	/* Acknowledge interrupts */
    160 	VIA_WRITE(VIA_REG_INTERRUPT, status);
    161 
    162 
    163 	if (handled)
    164 		return IRQ_HANDLED;
    165 	else
    166 		return IRQ_NONE;
    167 }
    168 
    169 static __inline__ void viadrv_acknowledge_irqs(drm_via_private_t *dev_priv)
    170 {
    171 	u32 status;
    172 
    173 	if (dev_priv) {
    174 		/* Acknowledge interrupts */
    175 		status = VIA_READ(VIA_REG_INTERRUPT);
    176 		VIA_WRITE(VIA_REG_INTERRUPT, status |
    177 			  dev_priv->irq_pending_mask);
    178 	}
    179 }
    180 
    181 int via_enable_vblank(struct drm_device *dev, int crtc)
    182 {
    183 	drm_via_private_t *dev_priv = dev->dev_private;
    184 	u32 status;
    185 
    186 	if (crtc != 0) {
    187 		DRM_ERROR("%s:  bad crtc %d\n", __func__, crtc);
    188 		return -EINVAL;
    189 	}
    190 
    191 	status = VIA_READ(VIA_REG_INTERRUPT);
    192 	VIA_WRITE(VIA_REG_INTERRUPT, status | VIA_IRQ_VBLANK_ENABLE);
    193 
    194 	VIA_WRITE8(0x83d4, 0x11);
    195 	VIA_WRITE8(0x83d5, VIA_READ8(0x83d5) | 0x30);
    196 
    197 	return 0;
    198 }
    199 
    200 void via_disable_vblank(struct drm_device *dev, int crtc)
    201 {
    202 	drm_via_private_t *dev_priv = dev->dev_private;
    203 	u32 status;
    204 
    205 	status = VIA_READ(VIA_REG_INTERRUPT);
    206 	VIA_WRITE(VIA_REG_INTERRUPT, status & ~VIA_IRQ_VBLANK_ENABLE);
    207 
    208 	VIA_WRITE8(0x83d4, 0x11);
    209 	VIA_WRITE8(0x83d5, VIA_READ8(0x83d5) & ~0x30);
    210 
    211 	if (crtc != 0)
    212 		DRM_ERROR("%s:  bad crtc %d\n", __func__, crtc);
    213 }
    214 
    215 static int
    216 via_driver_irq_wait(struct drm_device *dev, unsigned int irq, int force_sequence,
    217 		    unsigned int *sequence)
    218 {
    219 	drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
    220 	unsigned int cur_irq_sequence;
    221 	drm_via_irq_t *cur_irq;
    222 	int ret = 0;
    223 	maskarray_t *masks;
    224 	int real_irq;
    225 
    226 	DRM_DEBUG("\n");
    227 
    228 	if (!dev_priv) {
    229 		DRM_ERROR("called with no initialization\n");
    230 		return -EINVAL;
    231 	}
    232 
    233 	if (irq >= drm_via_irq_num) {
    234 		DRM_ERROR("Trying to wait on unknown irq %d\n", irq);
    235 		return -EINVAL;
    236 	}
    237 
    238 	real_irq = dev_priv->irq_map[irq];
    239 
    240 	if (real_irq < 0) {
    241 		DRM_ERROR("Video IRQ %d not available on this hardware.\n",
    242 			  irq);
    243 		return -EINVAL;
    244 	}
    245 
    246 	masks = dev_priv->irq_masks;
    247 	cur_irq = dev_priv->via_irqs + real_irq;
    248 
    249 #ifdef __NetBSD__
    250 	spin_lock(&cur_irq->irq_lock);
    251 	if (masks[real_irq][2] && !force_sequence) {
    252 		DRM_SPIN_WAIT_ON(ret, &cur_irq->irq_queue, &cur_irq->irq_lock,
    253 		    3 * DRM_HZ,
    254 		    ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
    255 			masks[irq][4]));
    256 		cur_irq_sequence = cur_irq->irq_received;
    257 	} else {
    258 		DRM_SPIN_WAIT_ON(ret, &cur_irq->irq_queue, &cur_irq->irq_lock,
    259 		    3 * DRM_HZ,
    260 		    (((cur_irq_sequence = cur_irq->irq_received) -
    261 			*sequence) <= (1 << 23)));
    262 	}
    263 	spin_unlock(&cur_irq->irq_lock);
    264 #else
    265 	if (masks[real_irq][2] && !force_sequence) {
    266 		DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * HZ,
    267 			    ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
    268 			     masks[irq][4]));
    269 		cur_irq_sequence = atomic_read(&cur_irq->irq_received);
    270 	} else {
    271 		DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * HZ,
    272 			    (((cur_irq_sequence =
    273 			       atomic_read(&cur_irq->irq_received)) -
    274 			      *sequence) <= (1 << 23)));
    275 	}
    276 #endif
    277 	*sequence = cur_irq_sequence;
    278 	return ret;
    279 }
    280 
    281 
    282 /*
    283  * drm_dma.h hooks
    284  */
    285 
    286 void via_driver_irq_preinstall(struct drm_device *dev)
    287 {
    288 	drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
    289 	u32 status;
    290 	drm_via_irq_t *cur_irq;
    291 	int i;
    292 
    293 	DRM_DEBUG("dev_priv: %p\n", dev_priv);
    294 	if (dev_priv) {
    295 		cur_irq = dev_priv->via_irqs;
    296 
    297 		dev_priv->irq_enable_mask = VIA_IRQ_VBLANK_ENABLE;
    298 		dev_priv->irq_pending_mask = VIA_IRQ_VBLANK_PENDING;
    299 
    300 		if (dev_priv->chipset == VIA_PRO_GROUP_A ||
    301 		    dev_priv->chipset == VIA_DX9_0) {
    302 			dev_priv->irq_masks = via_pro_group_a_irqs;
    303 			dev_priv->num_irqs = via_num_pro_group_a;
    304 			dev_priv->irq_map = via_irqmap_pro_group_a;
    305 		} else {
    306 			dev_priv->irq_masks = via_unichrome_irqs;
    307 			dev_priv->num_irqs = via_num_unichrome;
    308 			dev_priv->irq_map = via_irqmap_unichrome;
    309 		}
    310 
    311 		for (i = 0; i < dev_priv->num_irqs; ++i) {
    312 #ifdef __NetBSD__
    313 			spin_lock_init(&cur_irq->irq_lock);
    314 			cur_irq->irq_received = 0;
    315 #else
    316 			atomic_set(&cur_irq->irq_received, 0);
    317 #endif
    318 			cur_irq->enable_mask = dev_priv->irq_masks[i][0];
    319 			cur_irq->pending_mask = dev_priv->irq_masks[i][1];
    320 #ifdef __NetBSD__
    321 			DRM_INIT_WAITQUEUE(&cur_irq->irq_queue, "viairq");
    322 #else
    323 			init_waitqueue_head(&cur_irq->irq_queue);
    324 #endif
    325 			dev_priv->irq_enable_mask |= cur_irq->enable_mask;
    326 			dev_priv->irq_pending_mask |= cur_irq->pending_mask;
    327 			cur_irq++;
    328 
    329 			DRM_DEBUG("Initializing IRQ %d\n", i);
    330 		}
    331 
    332 		dev_priv->last_vblank_valid = 0;
    333 
    334 		/* Clear VSync interrupt regs */
    335 		status = VIA_READ(VIA_REG_INTERRUPT);
    336 		VIA_WRITE(VIA_REG_INTERRUPT, status &
    337 			  ~(dev_priv->irq_enable_mask));
    338 
    339 		/* Clear bits if they're already high */
    340 		viadrv_acknowledge_irqs(dev_priv);
    341 	}
    342 }
    343 
    344 int via_driver_irq_postinstall(struct drm_device *dev)
    345 {
    346 	drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
    347 	u32 status;
    348 
    349 	DRM_DEBUG("via_driver_irq_postinstall\n");
    350 	if (!dev_priv)
    351 		return -EINVAL;
    352 
    353 	status = VIA_READ(VIA_REG_INTERRUPT);
    354 	VIA_WRITE(VIA_REG_INTERRUPT, status | VIA_IRQ_GLOBAL
    355 		  | dev_priv->irq_enable_mask);
    356 
    357 	/* Some magic, oh for some data sheets ! */
    358 	VIA_WRITE8(0x83d4, 0x11);
    359 	VIA_WRITE8(0x83d5, VIA_READ8(0x83d5) | 0x30);
    360 
    361 	return 0;
    362 }
    363 
    364 void via_driver_irq_uninstall(struct drm_device *dev)
    365 {
    366 	drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
    367 	u32 status;
    368 
    369 	DRM_DEBUG("\n");
    370 	if (dev_priv) {
    371 
    372 		/* Some more magic, oh for some data sheets ! */
    373 
    374 		VIA_WRITE8(0x83d4, 0x11);
    375 		VIA_WRITE8(0x83d5, VIA_READ8(0x83d5) & ~0x30);
    376 
    377 		status = VIA_READ(VIA_REG_INTERRUPT);
    378 		VIA_WRITE(VIA_REG_INTERRUPT, status &
    379 			  ~(VIA_IRQ_VBLANK_ENABLE | dev_priv->irq_enable_mask));
    380 
    381 #ifdef __NetBSD__
    382 	    {
    383 		int i;
    384 
    385 		for (i = 0; i < dev_priv->num_irqs; i++) {
    386 			DRM_DESTROY_WAITQUEUE(&dev_priv->via_irqs[i].irq_queue);
    387 			spin_lock_destroy(&dev_priv->via_irqs[i].irq_lock);
    388 		}
    389 	    }
    390 #endif
    391 	}
    392 }
    393 
    394 int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
    395 {
    396 	drm_via_irqwait_t *irqwait = data;
    397 	struct timeval now;
    398 	int ret = 0;
    399 	drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
    400 	drm_via_irq_t *cur_irq = dev_priv->via_irqs;
    401 	int force_sequence;
    402 
    403 	if (irqwait->request.irq >= dev_priv->num_irqs) {
    404 		DRM_ERROR("Trying to wait on unknown irq %d\n",
    405 			  irqwait->request.irq);
    406 		return -EINVAL;
    407 	}
    408 
    409 	cur_irq += irqwait->request.irq;
    410 
    411 	switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
    412 	case VIA_IRQ_RELATIVE:
    413 #ifdef __NetBSD__
    414 		irqwait->request.sequence += cur_irq->irq_received;
    415 #else
    416 		irqwait->request.sequence +=
    417 			atomic_read(&cur_irq->irq_received);
    418 #endif
    419 		irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
    420 	case VIA_IRQ_ABSOLUTE:
    421 		break;
    422 	default:
    423 		return -EINVAL;
    424 	}
    425 
    426 	if (irqwait->request.type & VIA_IRQ_SIGNAL) {
    427 		DRM_ERROR("Signals on Via IRQs not implemented yet.\n");
    428 		return -EINVAL;
    429 	}
    430 
    431 	force_sequence = (irqwait->request.type & VIA_IRQ_FORCE_SEQUENCE);
    432 
    433 	ret = via_driver_irq_wait(dev, irqwait->request.irq, force_sequence,
    434 				  &irqwait->request.sequence);
    435 	do_gettimeofday(&now);
    436 	irqwait->reply.tval_sec = now.tv_sec;
    437 	irqwait->reply.tval_usec = now.tv_usec;
    438 
    439 	return ret;
    440 }
    441