Home | History | Annotate | Line # | Download | only in vmwgfx
vmwgfx_irq.c revision 1.1.1.2.28.1
      1 /*	$NetBSD: vmwgfx_irq.c,v 1.1.1.2.28.1 2018/09/06 06:56:34 pgoyette Exp $	*/
      2 
      3 /**************************************************************************
      4  *
      5  * Copyright  2009-2015 VMware, Inc., Palo Alto, CA., USA
      6  * All Rights Reserved.
      7  *
      8  * Permission is hereby granted, free of charge, to any person obtaining a
      9  * copy of this software and associated documentation files (the
     10  * "Software"), to deal in the Software without restriction, including
     11  * without limitation the rights to use, copy, modify, merge, publish,
     12  * distribute, sub license, and/or sell copies of the Software, and to
     13  * permit persons to whom the Software is furnished to do so, subject to
     14  * the following conditions:
     15  *
     16  * The above copyright notice and this permission notice (including the
     17  * next paragraph) shall be included in all copies or substantial portions
     18  * of the Software.
     19  *
     20  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     21  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     22  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
     23  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
     24  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
     25  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
     26  * USE OR OTHER DEALINGS IN THE SOFTWARE.
     27  *
     28  **************************************************************************/
     29 
     30 #include <sys/cdefs.h>
     31 __KERNEL_RCSID(0, "$NetBSD: vmwgfx_irq.c,v 1.1.1.2.28.1 2018/09/06 06:56:34 pgoyette Exp $");
     32 
     33 #include <drm/drmP.h>
     34 #include "vmwgfx_drv.h"
     35 
     36 #define VMW_FENCE_WRAP (1 << 24)
     37 
     38 irqreturn_t vmw_irq_handler(int irq, void *arg)
     39 {
     40 	struct drm_device *dev = (struct drm_device *)arg;
     41 	struct vmw_private *dev_priv = vmw_priv(dev);
     42 	uint32_t status, masked_status;
     43 
     44 	status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
     45 	masked_status = status & READ_ONCE(dev_priv->irq_mask);
     46 
     47 	if (likely(status))
     48 		outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
     49 
     50 	if (!status)
     51 		return IRQ_NONE;
     52 
     53 	if (masked_status & (SVGA_IRQFLAG_ANY_FENCE |
     54 			     SVGA_IRQFLAG_FENCE_GOAL)) {
     55 		vmw_fences_update(dev_priv->fman);
     56 		wake_up_all(&dev_priv->fence_queue);
     57 	}
     58 
     59 	if (masked_status & SVGA_IRQFLAG_FIFO_PROGRESS)
     60 		wake_up_all(&dev_priv->fifo_queue);
     61 
     62 	if (masked_status & (SVGA_IRQFLAG_COMMAND_BUFFER |
     63 			     SVGA_IRQFLAG_ERROR))
     64 		vmw_cmdbuf_tasklet_schedule(dev_priv->cman);
     65 
     66 	return IRQ_HANDLED;
     67 }
     68 
     69 static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t seqno)
     70 {
     71 
     72 	return (vmw_read(dev_priv, SVGA_REG_BUSY) == 0);
     73 }
     74 
     75 void vmw_update_seqno(struct vmw_private *dev_priv,
     76 			 struct vmw_fifo_state *fifo_state)
     77 {
     78 	u32 *fifo_mem = dev_priv->mmio_virt;
     79 	uint32_t seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE);
     80 
     81 	if (dev_priv->last_read_seqno != seqno) {
     82 		dev_priv->last_read_seqno = seqno;
     83 		vmw_marker_pull(&fifo_state->marker_queue, seqno);
     84 		vmw_fences_update(dev_priv->fman);
     85 	}
     86 }
     87 
     88 bool vmw_seqno_passed(struct vmw_private *dev_priv,
     89 			 uint32_t seqno)
     90 {
     91 	struct vmw_fifo_state *fifo_state;
     92 	bool ret;
     93 
     94 	if (likely(dev_priv->last_read_seqno - seqno < VMW_FENCE_WRAP))
     95 		return true;
     96 
     97 	fifo_state = &dev_priv->fifo;
     98 	vmw_update_seqno(dev_priv, fifo_state);
     99 	if (likely(dev_priv->last_read_seqno - seqno < VMW_FENCE_WRAP))
    100 		return true;
    101 
    102 	if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE) &&
    103 	    vmw_fifo_idle(dev_priv, seqno))
    104 		return true;
    105 
    106 	/**
    107 	 * Then check if the seqno is higher than what we've actually
    108 	 * emitted. Then the fence is stale and signaled.
    109 	 */
    110 
    111 	ret = ((atomic_read(&dev_priv->marker_seq) - seqno)
    112 	       > VMW_FENCE_WRAP);
    113 
    114 	return ret;
    115 }
    116 
    117 int vmw_fallback_wait(struct vmw_private *dev_priv,
    118 		      bool lazy,
    119 		      bool fifo_idle,
    120 		      uint32_t seqno,
    121 		      bool interruptible,
    122 		      unsigned long timeout)
    123 {
    124 	struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
    125 
    126 	uint32_t count = 0;
    127 	uint32_t signal_seq;
    128 	int ret;
    129 	unsigned long end_jiffies = jiffies + timeout;
    130 	bool (*wait_condition)(struct vmw_private *, uint32_t);
    131 	DEFINE_WAIT(__wait);
    132 
    133 	wait_condition = (fifo_idle) ? &vmw_fifo_idle :
    134 		&vmw_seqno_passed;
    135 
    136 	/**
    137 	 * Block command submission while waiting for idle.
    138 	 */
    139 
    140 	if (fifo_idle) {
    141 		down_read(&fifo_state->rwsem);
    142 		if (dev_priv->cman) {
    143 			ret = vmw_cmdbuf_idle(dev_priv->cman, interruptible,
    144 					      10*HZ);
    145 			if (ret)
    146 				goto out_err;
    147 		}
    148 	}
    149 
    150 	signal_seq = atomic_read(&dev_priv->marker_seq);
    151 	ret = 0;
    152 
    153 	for (;;) {
    154 		prepare_to_wait(&dev_priv->fence_queue, &__wait,
    155 				(interruptible) ?
    156 				TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
    157 		if (wait_condition(dev_priv, seqno))
    158 			break;
    159 		if (time_after_eq(jiffies, end_jiffies)) {
    160 			DRM_ERROR("SVGA device lockup.\n");
    161 			break;
    162 		}
    163 		if (lazy)
    164 			schedule_timeout(1);
    165 		else if ((++count & 0x0F) == 0) {
    166 			/**
    167 			 * FIXME: Use schedule_hr_timeout here for
    168 			 * newer kernels and lower CPU utilization.
    169 			 */
    170 
    171 			__set_current_state(TASK_RUNNING);
    172 			schedule();
    173 			__set_current_state((interruptible) ?
    174 					    TASK_INTERRUPTIBLE :
    175 					    TASK_UNINTERRUPTIBLE);
    176 		}
    177 		if (interruptible && signal_pending(current)) {
    178 			ret = -ERESTARTSYS;
    179 			break;
    180 		}
    181 	}
    182 	finish_wait(&dev_priv->fence_queue, &__wait);
    183 	if (ret == 0 && fifo_idle) {
    184 		u32 *fifo_mem = dev_priv->mmio_virt;
    185 
    186 		vmw_mmio_write(signal_seq, fifo_mem + SVGA_FIFO_FENCE);
    187 	}
    188 	wake_up_all(&dev_priv->fence_queue);
    189 out_err:
    190 	if (fifo_idle)
    191 		up_read(&fifo_state->rwsem);
    192 
    193 	return ret;
    194 }
    195 
    196 void vmw_generic_waiter_add(struct vmw_private *dev_priv,
    197 			    u32 flag, int *waiter_count)
    198 {
    199 	spin_lock_bh(&dev_priv->waiter_lock);
    200 	if ((*waiter_count)++ == 0) {
    201 		outl(flag, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
    202 		dev_priv->irq_mask |= flag;
    203 		vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
    204 	}
    205 	spin_unlock_bh(&dev_priv->waiter_lock);
    206 }
    207 
    208 void vmw_generic_waiter_remove(struct vmw_private *dev_priv,
    209 			       u32 flag, int *waiter_count)
    210 {
    211 	spin_lock_bh(&dev_priv->waiter_lock);
    212 	if (--(*waiter_count) == 0) {
    213 		dev_priv->irq_mask &= ~flag;
    214 		vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
    215 	}
    216 	spin_unlock_bh(&dev_priv->waiter_lock);
    217 }
    218 
    219 void vmw_seqno_waiter_add(struct vmw_private *dev_priv)
    220 {
    221 	vmw_generic_waiter_add(dev_priv, SVGA_IRQFLAG_ANY_FENCE,
    222 			       &dev_priv->fence_queue_waiters);
    223 }
    224 
    225 void vmw_seqno_waiter_remove(struct vmw_private *dev_priv)
    226 {
    227 	vmw_generic_waiter_remove(dev_priv, SVGA_IRQFLAG_ANY_FENCE,
    228 				  &dev_priv->fence_queue_waiters);
    229 }
    230 
    231 void vmw_goal_waiter_add(struct vmw_private *dev_priv)
    232 {
    233 	vmw_generic_waiter_add(dev_priv, SVGA_IRQFLAG_FENCE_GOAL,
    234 			       &dev_priv->goal_queue_waiters);
    235 }
    236 
    237 void vmw_goal_waiter_remove(struct vmw_private *dev_priv)
    238 {
    239 	vmw_generic_waiter_remove(dev_priv, SVGA_IRQFLAG_FENCE_GOAL,
    240 				  &dev_priv->goal_queue_waiters);
    241 }
    242 
    243 int vmw_wait_seqno(struct vmw_private *dev_priv,
    244 		      bool lazy, uint32_t seqno,
    245 		      bool interruptible, unsigned long timeout)
    246 {
    247 	long ret;
    248 	struct vmw_fifo_state *fifo = &dev_priv->fifo;
    249 
    250 	if (likely(dev_priv->last_read_seqno - seqno < VMW_FENCE_WRAP))
    251 		return 0;
    252 
    253 	if (likely(vmw_seqno_passed(dev_priv, seqno)))
    254 		return 0;
    255 
    256 	vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
    257 
    258 	if (!(fifo->capabilities & SVGA_FIFO_CAP_FENCE))
    259 		return vmw_fallback_wait(dev_priv, lazy, true, seqno,
    260 					 interruptible, timeout);
    261 
    262 	if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
    263 		return vmw_fallback_wait(dev_priv, lazy, false, seqno,
    264 					 interruptible, timeout);
    265 
    266 	vmw_seqno_waiter_add(dev_priv);
    267 
    268 	if (interruptible)
    269 		ret = wait_event_interruptible_timeout
    270 		    (dev_priv->fence_queue,
    271 		     vmw_seqno_passed(dev_priv, seqno),
    272 		     timeout);
    273 	else
    274 		ret = wait_event_timeout
    275 		    (dev_priv->fence_queue,
    276 		     vmw_seqno_passed(dev_priv, seqno),
    277 		     timeout);
    278 
    279 	vmw_seqno_waiter_remove(dev_priv);
    280 
    281 	if (unlikely(ret == 0))
    282 		ret = -EBUSY;
    283 	else if (likely(ret > 0))
    284 		ret = 0;
    285 
    286 	return ret;
    287 }
    288 
    289 void vmw_irq_preinstall(struct drm_device *dev)
    290 {
    291 	struct vmw_private *dev_priv = vmw_priv(dev);
    292 	uint32_t status;
    293 
    294 	if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
    295 		return;
    296 
    297 	status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
    298 	outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
    299 }
    300 
    301 int vmw_irq_postinstall(struct drm_device *dev)
    302 {
    303 	return 0;
    304 }
    305 
    306 void vmw_irq_uninstall(struct drm_device *dev)
    307 {
    308 	struct vmw_private *dev_priv = vmw_priv(dev);
    309 	uint32_t status;
    310 
    311 	if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
    312 		return;
    313 
    314 	vmw_write(dev_priv, SVGA_REG_IRQMASK, 0);
    315 
    316 	status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
    317 	outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
    318 }
    319