1 1.6 riastrad /* $NetBSD: vmwgfx_irq.c,v 1.6 2022/10/25 23:36:21 riastradh Exp $ */ 2 1.2 riastrad 3 1.3 riastrad // SPDX-License-Identifier: GPL-2.0 OR MIT 4 1.1 riastrad /************************************************************************** 5 1.1 riastrad * 6 1.3 riastrad * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA 7 1.1 riastrad * 8 1.1 riastrad * Permission is hereby granted, free of charge, to any person obtaining a 9 1.1 riastrad * copy of this software and associated documentation files (the 10 1.1 riastrad * "Software"), to deal in the Software without restriction, including 11 1.1 riastrad * without limitation the rights to use, copy, modify, merge, publish, 12 1.1 riastrad * distribute, sub license, and/or sell copies of the Software, and to 13 1.1 riastrad * permit persons to whom the Software is furnished to do so, subject to 14 1.1 riastrad * the following conditions: 15 1.1 riastrad * 16 1.1 riastrad * The above copyright notice and this permission notice (including the 17 1.1 riastrad * next paragraph) shall be included in all copies or substantial portions 18 1.1 riastrad * of the Software. 19 1.1 riastrad * 20 1.1 riastrad * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 21 1.1 riastrad * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 22 1.1 riastrad * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 23 1.1 riastrad * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 24 1.1 riastrad * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 25 1.1 riastrad * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 26 1.1 riastrad * USE OR OTHER DEALINGS IN THE SOFTWARE. 27 1.1 riastrad * 28 1.1 riastrad **************************************************************************/ 29 1.1 riastrad 30 1.2 riastrad #include <sys/cdefs.h> 31 1.6 riastrad __KERNEL_RCSID(0, "$NetBSD: vmwgfx_irq.c,v 1.6 2022/10/25 23:36:21 riastradh Exp $"); 32 1.2 riastrad 33 1.3 riastrad #include <linux/sched/signal.h> 34 1.3 riastrad 35 1.5 riastrad #include <drm/drm_irq.h> 36 1.5 riastrad 37 1.1 riastrad #include "vmwgfx_drv.h" 38 1.1 riastrad 39 1.1 riastrad #define VMW_FENCE_WRAP (1 << 24) 40 1.1 riastrad 41 1.3 riastrad /** 42 1.3 riastrad * vmw_thread_fn - Deferred (process context) irq handler 43 1.3 riastrad * 44 1.3 riastrad * @irq: irq number 45 1.3 riastrad * @arg: Closure argument. Pointer to a struct drm_device cast to void * 46 1.3 riastrad * 47 1.3 riastrad * This function implements the deferred part of irq processing. 48 1.3 riastrad * The function is guaranteed to run at least once after the 49 1.3 riastrad * vmw_irq_handler has returned with IRQ_WAKE_THREAD. 50 1.3 riastrad * 51 1.3 riastrad */ 52 1.6 riastrad #ifdef __NetBSD__ 53 1.6 riastrad static void 54 1.6 riastrad vmw_thread_fn(struct work *work, void *arg) 55 1.6 riastrad #else 56 1.3 riastrad static irqreturn_t vmw_thread_fn(int irq, void *arg) 57 1.6 riastrad #endif 58 1.3 riastrad { 59 1.3 riastrad struct drm_device *dev = (struct drm_device *)arg; 60 1.3 riastrad struct vmw_private *dev_priv = vmw_priv(dev); 61 1.3 riastrad irqreturn_t ret = IRQ_NONE; 62 1.3 riastrad 63 1.6 riastrad #ifdef __NetBSD__ 64 1.6 riastrad atomic_store_relaxed(&dev_priv->irqthread_scheduled, false); 65 1.6 riastrad #endif 66 1.6 riastrad 67 1.3 riastrad if (test_and_clear_bit(VMW_IRQTHREAD_FENCE, 68 1.3 riastrad dev_priv->irqthread_pending)) { 69 1.4 riastrad spin_lock(&dev_priv->fence_lock); 70 1.3 riastrad vmw_fences_update(dev_priv->fman); 71 1.4 riastrad DRM_SPIN_WAKEUP_ALL(&dev_priv->fence_queue, 72 1.4 riastrad &dev_priv->fence_lock); 73 1.4 riastrad spin_unlock(&dev_priv->fence_lock); 74 1.3 riastrad ret = IRQ_HANDLED; 75 1.3 riastrad } 76 1.3 riastrad 77 1.3 riastrad if (test_and_clear_bit(VMW_IRQTHREAD_CMDBUF, 78 1.3 riastrad dev_priv->irqthread_pending)) { 79 1.3 riastrad vmw_cmdbuf_irqthread(dev_priv->cman); 80 1.3 riastrad ret = IRQ_HANDLED; 81 1.3 riastrad } 82 1.3 riastrad 83 1.6 riastrad #ifndef __NetBSD__ 84 1.3 riastrad return ret; 85 1.6 riastrad #endif 86 1.3 riastrad } 87 1.3 riastrad 88 1.3 riastrad /** 89 1.3 riastrad * vmw_irq_handler irq handler 90 1.3 riastrad * 91 1.3 riastrad * @irq: irq number 92 1.3 riastrad * @arg: Closure argument. Pointer to a struct drm_device cast to void * 93 1.3 riastrad * 94 1.3 riastrad * This function implements the quick part of irq processing. 95 1.3 riastrad * The function performs fast actions like clearing the device interrupt 96 1.3 riastrad * flags and also reasonably quick actions like waking processes waiting for 97 1.3 riastrad * FIFO space. Other IRQ actions are deferred to the IRQ thread. 98 1.3 riastrad */ 99 1.3 riastrad static irqreturn_t vmw_irq_handler(int irq, void *arg) 100 1.1 riastrad { 101 1.1 riastrad struct drm_device *dev = (struct drm_device *)arg; 102 1.1 riastrad struct vmw_private *dev_priv = vmw_priv(dev); 103 1.1 riastrad uint32_t status, masked_status; 104 1.3 riastrad irqreturn_t ret = IRQ_HANDLED; 105 1.1 riastrad 106 1.5 riastrad #ifdef __NetBSD__ 107 1.5 riastrad status = bus_space_read_4(dev_priv->iot, dev_priv->ioh, 108 1.5 riastrad VMWGFX_IRQSTATUS_PORT); 109 1.5 riastrad #else 110 1.1 riastrad status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); 111 1.5 riastrad #endif 112 1.2 riastrad masked_status = status & READ_ONCE(dev_priv->irq_mask); 113 1.1 riastrad 114 1.1 riastrad if (likely(status)) 115 1.5 riastrad #ifdef __NetBSD__ 116 1.5 riastrad bus_space_write_4(dev_priv->iot, dev_priv->ioh, 117 1.5 riastrad VMWGFX_IRQSTATUS_PORT, status); 118 1.5 riastrad #else 119 1.1 riastrad outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); 120 1.5 riastrad #endif 121 1.1 riastrad 122 1.2 riastrad if (!status) 123 1.1 riastrad return IRQ_NONE; 124 1.1 riastrad 125 1.4 riastrad if (masked_status & SVGA_IRQFLAG_FIFO_PROGRESS) { 126 1.4 riastrad spin_lock(&dev_priv->fifo_lock); 127 1.4 riastrad DRM_SPIN_WAKEUP_ALL(&dev_priv->fifo_queue, 128 1.4 riastrad &dev_priv->fifo_lock); 129 1.4 riastrad spin_unlock(&dev_priv->fifo_lock); 130 1.4 riastrad } 131 1.1 riastrad 132 1.3 riastrad if ((masked_status & (SVGA_IRQFLAG_ANY_FENCE | 133 1.3 riastrad SVGA_IRQFLAG_FENCE_GOAL)) && 134 1.3 riastrad !test_and_set_bit(VMW_IRQTHREAD_FENCE, dev_priv->irqthread_pending)) 135 1.3 riastrad ret = IRQ_WAKE_THREAD; 136 1.3 riastrad 137 1.3 riastrad if ((masked_status & (SVGA_IRQFLAG_COMMAND_BUFFER | 138 1.3 riastrad SVGA_IRQFLAG_ERROR)) && 139 1.3 riastrad !test_and_set_bit(VMW_IRQTHREAD_CMDBUF, 140 1.3 riastrad dev_priv->irqthread_pending)) 141 1.3 riastrad ret = IRQ_WAKE_THREAD; 142 1.1 riastrad 143 1.6 riastrad #ifdef __NetBSD__ 144 1.6 riastrad if (ret == IRQ_WAKE_THREAD) { 145 1.6 riastrad if (atomic_swap_uint(&dev_priv->irqthread_scheduled, 1) == 0) { 146 1.6 riastrad workqueue_enqueue(dev_priv->irqthread_wq, 147 1.6 riastrad &dev_priv->irqthread_work, NULL); 148 1.6 riastrad } 149 1.6 riastrad ret = IRQ_HANDLED; 150 1.6 riastrad } 151 1.6 riastrad #endif 152 1.6 riastrad 153 1.3 riastrad return ret; 154 1.1 riastrad } 155 1.1 riastrad 156 1.1 riastrad static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t seqno) 157 1.1 riastrad { 158 1.1 riastrad 159 1.2 riastrad return (vmw_read(dev_priv, SVGA_REG_BUSY) == 0); 160 1.1 riastrad } 161 1.1 riastrad 162 1.1 riastrad void vmw_update_seqno(struct vmw_private *dev_priv, 163 1.1 riastrad struct vmw_fifo_state *fifo_state) 164 1.1 riastrad { 165 1.2 riastrad u32 *fifo_mem = dev_priv->mmio_virt; 166 1.2 riastrad uint32_t seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE); 167 1.1 riastrad 168 1.4 riastrad assert_spin_locked(&dev_priv->fence_lock); 169 1.4 riastrad 170 1.1 riastrad if (dev_priv->last_read_seqno != seqno) { 171 1.1 riastrad dev_priv->last_read_seqno = seqno; 172 1.1 riastrad vmw_marker_pull(&fifo_state->marker_queue, seqno); 173 1.1 riastrad vmw_fences_update(dev_priv->fman); 174 1.1 riastrad } 175 1.1 riastrad } 176 1.1 riastrad 177 1.1 riastrad bool vmw_seqno_passed(struct vmw_private *dev_priv, 178 1.1 riastrad uint32_t seqno) 179 1.1 riastrad { 180 1.1 riastrad struct vmw_fifo_state *fifo_state; 181 1.1 riastrad bool ret; 182 1.1 riastrad 183 1.4 riastrad assert_spin_locked(&dev_priv->fence_lock); 184 1.4 riastrad 185 1.1 riastrad if (likely(dev_priv->last_read_seqno - seqno < VMW_FENCE_WRAP)) 186 1.1 riastrad return true; 187 1.1 riastrad 188 1.1 riastrad fifo_state = &dev_priv->fifo; 189 1.1 riastrad vmw_update_seqno(dev_priv, fifo_state); 190 1.1 riastrad if (likely(dev_priv->last_read_seqno - seqno < VMW_FENCE_WRAP)) 191 1.1 riastrad return true; 192 1.1 riastrad 193 1.1 riastrad if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE) && 194 1.1 riastrad vmw_fifo_idle(dev_priv, seqno)) 195 1.1 riastrad return true; 196 1.1 riastrad 197 1.1 riastrad /** 198 1.1 riastrad * Then check if the seqno is higher than what we've actually 199 1.1 riastrad * emitted. Then the fence is stale and signaled. 200 1.1 riastrad */ 201 1.1 riastrad 202 1.1 riastrad ret = ((atomic_read(&dev_priv->marker_seq) - seqno) 203 1.1 riastrad > VMW_FENCE_WRAP); 204 1.1 riastrad 205 1.1 riastrad return ret; 206 1.1 riastrad } 207 1.1 riastrad 208 1.1 riastrad int vmw_fallback_wait(struct vmw_private *dev_priv, 209 1.1 riastrad bool lazy, 210 1.1 riastrad bool fifo_idle, 211 1.1 riastrad uint32_t seqno, 212 1.1 riastrad bool interruptible, 213 1.1 riastrad unsigned long timeout) 214 1.1 riastrad { 215 1.1 riastrad struct vmw_fifo_state *fifo_state = &dev_priv->fifo; 216 1.1 riastrad 217 1.1 riastrad uint32_t count = 0; 218 1.1 riastrad uint32_t signal_seq; 219 1.1 riastrad int ret; 220 1.1 riastrad unsigned long end_jiffies = jiffies + timeout; 221 1.1 riastrad bool (*wait_condition)(struct vmw_private *, uint32_t); 222 1.4 riastrad #ifndef __NetBSD__ 223 1.1 riastrad DEFINE_WAIT(__wait); 224 1.4 riastrad #endif 225 1.1 riastrad 226 1.1 riastrad wait_condition = (fifo_idle) ? &vmw_fifo_idle : 227 1.1 riastrad &vmw_seqno_passed; 228 1.1 riastrad 229 1.1 riastrad /** 230 1.1 riastrad * Block command submission while waiting for idle. 231 1.1 riastrad */ 232 1.1 riastrad 233 1.2 riastrad if (fifo_idle) { 234 1.1 riastrad down_read(&fifo_state->rwsem); 235 1.2 riastrad if (dev_priv->cman) { 236 1.2 riastrad ret = vmw_cmdbuf_idle(dev_priv->cman, interruptible, 237 1.2 riastrad 10*HZ); 238 1.2 riastrad if (ret) 239 1.2 riastrad goto out_err; 240 1.2 riastrad } 241 1.2 riastrad } 242 1.2 riastrad 243 1.4 riastrad spin_lock(&dev_priv->fence_lock); 244 1.4 riastrad 245 1.1 riastrad signal_seq = atomic_read(&dev_priv->marker_seq); 246 1.1 riastrad ret = 0; 247 1.1 riastrad 248 1.1 riastrad for (;;) { 249 1.4 riastrad #ifdef __NetBSD__ 250 1.4 riastrad if (!lazy) { 251 1.4 riastrad if (wait_condition(dev_priv, seqno)) 252 1.4 riastrad break; 253 1.4 riastrad spin_unlock(&dev_priv->fence_lock); 254 1.4 riastrad if ((++count & 0xf) == 0) 255 1.4 riastrad yield(); 256 1.4 riastrad spin_lock(&dev_priv->fence_lock); 257 1.4 riastrad } else if (interruptible) { 258 1.4 riastrad DRM_SPIN_TIMED_WAIT_UNTIL(ret, &dev_priv->fence_queue, 259 1.4 riastrad &dev_priv->fence_lock, /*timeout*/1, 260 1.4 riastrad wait_condition(dev_priv, seqno)); 261 1.4 riastrad } else { 262 1.4 riastrad DRM_SPIN_TIMED_WAIT_NOINTR_UNTIL(ret, 263 1.4 riastrad &dev_priv->fence_queue, 264 1.4 riastrad &dev_priv->fence_lock, /*timeout*/1, 265 1.4 riastrad wait_condition(dev_priv, seqno)); 266 1.4 riastrad } 267 1.4 riastrad if (ret) { /* success or error but not timeout */ 268 1.4 riastrad if (ret > 0) /* success */ 269 1.4 riastrad ret = 0; 270 1.4 riastrad break; 271 1.4 riastrad } 272 1.4 riastrad if (time_after_eq(jiffies, end_jiffies)) { 273 1.4 riastrad DRM_ERROR("SVGA device lockup.\n"); 274 1.4 riastrad break; 275 1.4 riastrad } 276 1.4 riastrad #else 277 1.1 riastrad prepare_to_wait(&dev_priv->fence_queue, &__wait, 278 1.1 riastrad (interruptible) ? 279 1.1 riastrad TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE); 280 1.1 riastrad if (wait_condition(dev_priv, seqno)) 281 1.1 riastrad break; 282 1.1 riastrad if (time_after_eq(jiffies, end_jiffies)) { 283 1.1 riastrad DRM_ERROR("SVGA device lockup.\n"); 284 1.1 riastrad break; 285 1.1 riastrad } 286 1.1 riastrad if (lazy) 287 1.1 riastrad schedule_timeout(1); 288 1.1 riastrad else if ((++count & 0x0F) == 0) { 289 1.1 riastrad /** 290 1.1 riastrad * FIXME: Use schedule_hr_timeout here for 291 1.1 riastrad * newer kernels and lower CPU utilization. 292 1.1 riastrad */ 293 1.1 riastrad 294 1.1 riastrad __set_current_state(TASK_RUNNING); 295 1.1 riastrad schedule(); 296 1.1 riastrad __set_current_state((interruptible) ? 297 1.1 riastrad TASK_INTERRUPTIBLE : 298 1.1 riastrad TASK_UNINTERRUPTIBLE); 299 1.1 riastrad } 300 1.1 riastrad if (interruptible && signal_pending(current)) { 301 1.1 riastrad ret = -ERESTARTSYS; 302 1.1 riastrad break; 303 1.1 riastrad } 304 1.4 riastrad #endif 305 1.1 riastrad } 306 1.4 riastrad #ifndef __NetBSD__ 307 1.1 riastrad finish_wait(&dev_priv->fence_queue, &__wait); 308 1.4 riastrad #endif 309 1.1 riastrad if (ret == 0 && fifo_idle) { 310 1.2 riastrad u32 *fifo_mem = dev_priv->mmio_virt; 311 1.2 riastrad 312 1.2 riastrad vmw_mmio_write(signal_seq, fifo_mem + SVGA_FIFO_FENCE); 313 1.1 riastrad } 314 1.4 riastrad #ifdef __NetBSD__ 315 1.4 riastrad DRM_SPIN_WAKEUP_ALL(&dev_priv->fence_queue, &dev_priv->fence_lock); 316 1.4 riastrad spin_unlock(&dev_priv->fence_lock); 317 1.4 riastrad #else 318 1.1 riastrad wake_up_all(&dev_priv->fence_queue); 319 1.4 riastrad #endif 320 1.2 riastrad out_err: 321 1.1 riastrad if (fifo_idle) 322 1.1 riastrad up_read(&fifo_state->rwsem); 323 1.1 riastrad 324 1.1 riastrad return ret; 325 1.1 riastrad } 326 1.1 riastrad 327 1.2 riastrad void vmw_generic_waiter_add(struct vmw_private *dev_priv, 328 1.2 riastrad u32 flag, int *waiter_count) 329 1.1 riastrad { 330 1.2 riastrad spin_lock_bh(&dev_priv->waiter_lock); 331 1.2 riastrad if ((*waiter_count)++ == 0) { 332 1.5 riastrad #ifdef __NetBSD__ 333 1.5 riastrad bus_space_write_4(dev_priv->iot, dev_priv->ioh, 334 1.5 riastrad VMWGFX_IRQSTATUS_PORT, flag); 335 1.5 riastrad #else 336 1.2 riastrad outl(flag, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); 337 1.5 riastrad #endif 338 1.2 riastrad dev_priv->irq_mask |= flag; 339 1.1 riastrad vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask); 340 1.1 riastrad } 341 1.2 riastrad spin_unlock_bh(&dev_priv->waiter_lock); 342 1.1 riastrad } 343 1.1 riastrad 344 1.2 riastrad void vmw_generic_waiter_remove(struct vmw_private *dev_priv, 345 1.2 riastrad u32 flag, int *waiter_count) 346 1.1 riastrad { 347 1.2 riastrad spin_lock_bh(&dev_priv->waiter_lock); 348 1.2 riastrad if (--(*waiter_count) == 0) { 349 1.2 riastrad dev_priv->irq_mask &= ~flag; 350 1.1 riastrad vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask); 351 1.1 riastrad } 352 1.2 riastrad spin_unlock_bh(&dev_priv->waiter_lock); 353 1.1 riastrad } 354 1.1 riastrad 355 1.2 riastrad void vmw_seqno_waiter_add(struct vmw_private *dev_priv) 356 1.2 riastrad { 357 1.2 riastrad vmw_generic_waiter_add(dev_priv, SVGA_IRQFLAG_ANY_FENCE, 358 1.2 riastrad &dev_priv->fence_queue_waiters); 359 1.2 riastrad } 360 1.2 riastrad 361 1.2 riastrad void vmw_seqno_waiter_remove(struct vmw_private *dev_priv) 362 1.2 riastrad { 363 1.2 riastrad vmw_generic_waiter_remove(dev_priv, SVGA_IRQFLAG_ANY_FENCE, 364 1.2 riastrad &dev_priv->fence_queue_waiters); 365 1.2 riastrad } 366 1.1 riastrad 367 1.1 riastrad void vmw_goal_waiter_add(struct vmw_private *dev_priv) 368 1.1 riastrad { 369 1.2 riastrad vmw_generic_waiter_add(dev_priv, SVGA_IRQFLAG_FENCE_GOAL, 370 1.2 riastrad &dev_priv->goal_queue_waiters); 371 1.1 riastrad } 372 1.1 riastrad 373 1.1 riastrad void vmw_goal_waiter_remove(struct vmw_private *dev_priv) 374 1.1 riastrad { 375 1.2 riastrad vmw_generic_waiter_remove(dev_priv, SVGA_IRQFLAG_FENCE_GOAL, 376 1.2 riastrad &dev_priv->goal_queue_waiters); 377 1.1 riastrad } 378 1.1 riastrad 379 1.1 riastrad int vmw_wait_seqno(struct vmw_private *dev_priv, 380 1.1 riastrad bool lazy, uint32_t seqno, 381 1.1 riastrad bool interruptible, unsigned long timeout) 382 1.1 riastrad { 383 1.1 riastrad long ret; 384 1.1 riastrad struct vmw_fifo_state *fifo = &dev_priv->fifo; 385 1.1 riastrad 386 1.4 riastrad spin_lock(&dev_priv->fence_lock); 387 1.4 riastrad if (likely(dev_priv->last_read_seqno - seqno < VMW_FENCE_WRAP)) { 388 1.4 riastrad spin_unlock(&dev_priv->fence_lock); 389 1.1 riastrad return 0; 390 1.4 riastrad } 391 1.1 riastrad 392 1.4 riastrad if (likely(vmw_seqno_passed(dev_priv, seqno))) { 393 1.4 riastrad spin_unlock(&dev_priv->fence_lock); 394 1.1 riastrad return 0; 395 1.4 riastrad } 396 1.1 riastrad 397 1.1 riastrad vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC); 398 1.1 riastrad 399 1.4 riastrad if (!(fifo->capabilities & SVGA_FIFO_CAP_FENCE)) { 400 1.4 riastrad spin_unlock(&dev_priv->fence_lock); 401 1.1 riastrad return vmw_fallback_wait(dev_priv, lazy, true, seqno, 402 1.1 riastrad interruptible, timeout); 403 1.4 riastrad } 404 1.1 riastrad 405 1.4 riastrad if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK)) { 406 1.4 riastrad spin_unlock(&dev_priv->fence_lock); 407 1.1 riastrad return vmw_fallback_wait(dev_priv, lazy, false, seqno, 408 1.1 riastrad interruptible, timeout); 409 1.4 riastrad } 410 1.1 riastrad 411 1.1 riastrad vmw_seqno_waiter_add(dev_priv); 412 1.1 riastrad 413 1.1 riastrad if (interruptible) 414 1.4 riastrad DRM_SPIN_TIMED_WAIT_UNTIL(ret, &dev_priv->fence_queue, 415 1.4 riastrad &dev_priv->fence_lock, timeout, 416 1.4 riastrad vmw_seqno_passed(dev_priv, seqno)); 417 1.1 riastrad else 418 1.4 riastrad DRM_SPIN_TIMED_WAIT_NOINTR_UNTIL(ret, &dev_priv->fence_queue, 419 1.4 riastrad &dev_priv->fence_lock, timeout, 420 1.4 riastrad vmw_seqno_passed(dev_priv, seqno)); 421 1.1 riastrad 422 1.1 riastrad vmw_seqno_waiter_remove(dev_priv); 423 1.1 riastrad 424 1.4 riastrad spin_unlock(&dev_priv->fence_lock); 425 1.4 riastrad 426 1.1 riastrad if (unlikely(ret == 0)) 427 1.1 riastrad ret = -EBUSY; 428 1.1 riastrad else if (likely(ret > 0)) 429 1.1 riastrad ret = 0; 430 1.1 riastrad 431 1.1 riastrad return ret; 432 1.1 riastrad } 433 1.1 riastrad 434 1.3 riastrad static void vmw_irq_preinstall(struct drm_device *dev) 435 1.1 riastrad { 436 1.1 riastrad struct vmw_private *dev_priv = vmw_priv(dev); 437 1.1 riastrad uint32_t status; 438 1.1 riastrad 439 1.5 riastrad #ifdef __NetBSD__ 440 1.5 riastrad status = bus_space_read_4(dev_priv->iot, dev_priv->ioh, 441 1.5 riastrad VMWGFX_IRQSTATUS_PORT); 442 1.5 riastrad bus_space_write_4(dev_priv->iot, dev_priv->ioh, VMWGFX_IRQSTATUS_PORT, 443 1.5 riastrad status); 444 1.5 riastrad #else 445 1.1 riastrad status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); 446 1.1 riastrad outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); 447 1.5 riastrad #endif 448 1.1 riastrad } 449 1.1 riastrad 450 1.1 riastrad void vmw_irq_uninstall(struct drm_device *dev) 451 1.1 riastrad { 452 1.1 riastrad struct vmw_private *dev_priv = vmw_priv(dev); 453 1.1 riastrad uint32_t status; 454 1.1 riastrad 455 1.1 riastrad if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK)) 456 1.1 riastrad return; 457 1.1 riastrad 458 1.3 riastrad if (!dev->irq_enabled) 459 1.3 riastrad return; 460 1.3 riastrad 461 1.1 riastrad vmw_write(dev_priv, SVGA_REG_IRQMASK, 0); 462 1.1 riastrad 463 1.5 riastrad #ifdef __NetBSD__ 464 1.5 riastrad status = bus_space_read_4(dev_priv->iot, dev_priv->ioh, 465 1.5 riastrad VMWGFX_IRQSTATUS_PORT); 466 1.5 riastrad bus_space_write_4(dev_priv->iot, dev_priv->ioh, VMWGFX_IRQSTATUS_PORT, 467 1.5 riastrad status); 468 1.5 riastrad #else 469 1.1 riastrad status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); 470 1.1 riastrad outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); 471 1.5 riastrad #endif 472 1.3 riastrad 473 1.3 riastrad dev->irq_enabled = false; 474 1.6 riastrad #ifdef __NetBSD__ 475 1.6 riastrad int ret = drm_irq_uninstall(dev); 476 1.6 riastrad KASSERT(ret == 0); 477 1.6 riastrad workqueue_destroy(dev_priv->irqthread_wq); 478 1.6 riastrad #else 479 1.3 riastrad free_irq(dev->irq, dev); 480 1.6 riastrad #endif 481 1.3 riastrad } 482 1.3 riastrad 483 1.3 riastrad /** 484 1.3 riastrad * vmw_irq_install - Install the irq handlers 485 1.3 riastrad * 486 1.3 riastrad * @dev: Pointer to the drm device. 487 1.3 riastrad * @irq: The irq number. 488 1.3 riastrad * Return: Zero if successful. Negative number otherwise. 489 1.3 riastrad */ 490 1.3 riastrad int vmw_irq_install(struct drm_device *dev, int irq) 491 1.3 riastrad { 492 1.3 riastrad int ret; 493 1.3 riastrad 494 1.3 riastrad if (dev->irq_enabled) 495 1.3 riastrad return -EBUSY; 496 1.3 riastrad 497 1.3 riastrad vmw_irq_preinstall(dev); 498 1.3 riastrad 499 1.6 riastrad #ifdef __NetBSD__ 500 1.6 riastrad /* XXX errno NetBSD->Linux */ 501 1.6 riastrad ret = -workqueue_create(&vmw_priv(dev)->irqthread_wq, "vmwgfirq", 502 1.6 riastrad vmw_thread_fn, dev, PRI_NONE, IPL_DRM, WQ_MPSAFE); 503 1.6 riastrad if (ret < 0) 504 1.6 riastrad return ret; 505 1.6 riastrad ret = drm_irq_install(dev); 506 1.6 riastrad if (ret < 0) { 507 1.6 riastrad workqueue_destroy(vmw_priv(dev)->irqthread_wq); 508 1.6 riastrad vmw_priv(dev)->irqthread_wq = NULL; 509 1.6 riastrad } 510 1.6 riastrad #else 511 1.3 riastrad ret = request_threaded_irq(irq, vmw_irq_handler, vmw_thread_fn, 512 1.3 riastrad IRQF_SHARED, VMWGFX_DRIVER_NAME, dev); 513 1.6 riastrad #endif 514 1.3 riastrad if (ret < 0) 515 1.3 riastrad return ret; 516 1.3 riastrad 517 1.3 riastrad dev->irq_enabled = true; 518 1.3 riastrad dev->irq = irq; 519 1.3 riastrad 520 1.3 riastrad return ret; 521 1.1 riastrad } 522