Home | History | Annotate | Line # | Download | only in qxl
      1 /*	$NetBSD: qxl_cmd.c,v 1.3 2021/12/18 23:45:42 riastradh Exp $	*/
      2 
      3 /*
      4  * Copyright 2013 Red Hat Inc.
      5  *
      6  * Permission is hereby granted, free of charge, to any person obtaining a
      7  * copy of this software and associated documentation files (the "Software"),
      8  * to deal in the Software without restriction, including without limitation
      9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
     10  * and/or sell copies of the Software, and to permit persons to whom the
     11  * Software is furnished to do so, subject to the following conditions:
     12  *
     13  * The above copyright notice and this permission notice shall be included in
     14  * all copies or substantial portions of the Software.
     15  *
     16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
     20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
     21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
     22  * OTHER DEALINGS IN THE SOFTWARE.
     23  *
     24  * Authors: Dave Airlie
     25  *          Alon Levy
     26  */
     27 
     28 /* QXL cmd/ring handling */
     29 
     30 #include <sys/cdefs.h>
     31 __KERNEL_RCSID(0, "$NetBSD: qxl_cmd.c,v 1.3 2021/12/18 23:45:42 riastradh Exp $");
     32 
     33 #include <linux/delay.h>
     34 
     35 #include <drm/drm_util.h>
     36 
     37 #include "qxl_drv.h"
     38 #include "qxl_object.h"
     39 
     40 static int qxl_reap_surface_id(struct qxl_device *qdev, int max_to_reap);
     41 
     42 struct ring {
     43 	struct qxl_ring_header      header;
     44 	uint8_t                     elements[0];
     45 };
     46 
     47 struct qxl_ring {
     48 	struct ring	       *ring;
     49 	int			element_size;
     50 	int			n_elements;
     51 	int			prod_notify;
     52 	wait_queue_head_t      *push_event;
     53 	spinlock_t             lock;
     54 };
     55 
     56 void qxl_ring_free(struct qxl_ring *ring)
     57 {
     58 	kfree(ring);
     59 }
     60 
     61 void qxl_ring_init_hdr(struct qxl_ring *ring)
     62 {
     63 	ring->ring->header.notify_on_prod = ring->n_elements;
     64 }
     65 
     66 struct qxl_ring *
     67 qxl_ring_create(struct qxl_ring_header *header,
     68 		int element_size,
     69 		int n_elements,
     70 		int prod_notify,
     71 		bool set_prod_notify,
     72 		wait_queue_head_t *push_event)
     73 {
     74 	struct qxl_ring *ring;
     75 
     76 	ring = kmalloc(sizeof(*ring), GFP_KERNEL);
     77 	if (!ring)
     78 		return NULL;
     79 
     80 	ring->ring = (struct ring *)header;
     81 	ring->element_size = element_size;
     82 	ring->n_elements = n_elements;
     83 	ring->prod_notify = prod_notify;
     84 	ring->push_event = push_event;
     85 	if (set_prod_notify)
     86 		qxl_ring_init_hdr(ring);
     87 	spin_lock_init(&ring->lock);
     88 	return ring;
     89 }
     90 
     91 static int qxl_check_header(struct qxl_ring *ring)
     92 {
     93 	int ret;
     94 	struct qxl_ring_header *header = &(ring->ring->header);
     95 	unsigned long flags;
     96 
     97 	spin_lock_irqsave(&ring->lock, flags);
     98 	ret = header->prod - header->cons < header->num_items;
     99 	if (ret == 0)
    100 		header->notify_on_cons = header->cons + 1;
    101 	spin_unlock_irqrestore(&ring->lock, flags);
    102 	return ret;
    103 }
    104 
    105 int qxl_check_idle(struct qxl_ring *ring)
    106 {
    107 	int ret;
    108 	struct qxl_ring_header *header = &(ring->ring->header);
    109 	unsigned long flags;
    110 
    111 	spin_lock_irqsave(&ring->lock, flags);
    112 	ret = header->prod == header->cons;
    113 	spin_unlock_irqrestore(&ring->lock, flags);
    114 	return ret;
    115 }
    116 
    117 int qxl_ring_push(struct qxl_ring *ring,
    118 		  const void *new_elt, bool interruptible)
    119 {
    120 	struct qxl_ring_header *header = &(ring->ring->header);
    121 	uint8_t *elt;
    122 	int idx, ret;
    123 	unsigned long flags;
    124 
    125 	spin_lock_irqsave(&ring->lock, flags);
    126 	if (header->prod - header->cons == header->num_items) {
    127 		header->notify_on_cons = header->cons + 1;
    128 		mb();
    129 		spin_unlock_irqrestore(&ring->lock, flags);
    130 		if (!drm_can_sleep()) {
    131 			while (!qxl_check_header(ring))
    132 				udelay(1);
    133 		} else {
    134 			if (interruptible) {
    135 				ret = wait_event_interruptible(*ring->push_event,
    136 							       qxl_check_header(ring));
    137 				if (ret)
    138 					return ret;
    139 			} else {
    140 				wait_event(*ring->push_event,
    141 					   qxl_check_header(ring));
    142 			}
    143 
    144 		}
    145 		spin_lock_irqsave(&ring->lock, flags);
    146 	}
    147 
    148 	idx = header->prod & (ring->n_elements - 1);
    149 	elt = ring->ring->elements + idx * ring->element_size;
    150 
    151 	memcpy((void *)elt, new_elt, ring->element_size);
    152 
    153 	header->prod++;
    154 
    155 	mb();
    156 
    157 	if (header->prod == header->notify_on_prod)
    158 		outb(0, ring->prod_notify);
    159 
    160 	spin_unlock_irqrestore(&ring->lock, flags);
    161 	return 0;
    162 }
    163 
    164 static bool qxl_ring_pop(struct qxl_ring *ring,
    165 			 void *element)
    166 {
    167 	volatile struct qxl_ring_header *header = &(ring->ring->header);
    168 	volatile uint8_t *ring_elt;
    169 	int idx;
    170 	unsigned long flags;
    171 
    172 	spin_lock_irqsave(&ring->lock, flags);
    173 	if (header->cons == header->prod) {
    174 		header->notify_on_prod = header->cons + 1;
    175 		spin_unlock_irqrestore(&ring->lock, flags);
    176 		return false;
    177 	}
    178 
    179 	idx = header->cons & (ring->n_elements - 1);
    180 	ring_elt = ring->ring->elements + idx * ring->element_size;
    181 
    182 	memcpy(element, (void *)ring_elt, ring->element_size);
    183 
    184 	header->cons++;
    185 
    186 	spin_unlock_irqrestore(&ring->lock, flags);
    187 	return true;
    188 }
    189 
    190 int
    191 qxl_push_command_ring_release(struct qxl_device *qdev, struct qxl_release *release,
    192 			      uint32_t type, bool interruptible)
    193 {
    194 	struct qxl_command cmd;
    195 
    196 	cmd.type = type;
    197 	cmd.data = qxl_bo_physical_address(qdev, release->release_bo, release->release_offset);
    198 
    199 	return qxl_ring_push(qdev->command_ring, &cmd, interruptible);
    200 }
    201 
    202 int
    203 qxl_push_cursor_ring_release(struct qxl_device *qdev, struct qxl_release *release,
    204 			     uint32_t type, bool interruptible)
    205 {
    206 	struct qxl_command cmd;
    207 
    208 	cmd.type = type;
    209 	cmd.data = qxl_bo_physical_address(qdev, release->release_bo, release->release_offset);
    210 
    211 	return qxl_ring_push(qdev->cursor_ring, &cmd, interruptible);
    212 }
    213 
    214 bool qxl_queue_garbage_collect(struct qxl_device *qdev, bool flush)
    215 {
    216 	if (!qxl_check_idle(qdev->release_ring)) {
    217 		schedule_work(&qdev->gc_work);
    218 		if (flush)
    219 			flush_work(&qdev->gc_work);
    220 		return true;
    221 	}
    222 	return false;
    223 }
    224 
    225 int qxl_garbage_collect(struct qxl_device *qdev)
    226 {
    227 	struct qxl_release *release;
    228 	uint64_t id, next_id;
    229 	int i = 0;
    230 	union qxl_release_info *info;
    231 
    232 	while (qxl_ring_pop(qdev->release_ring, &id)) {
    233 		DRM_DEBUG_DRIVER("popped %lld\n", id);
    234 		while (id) {
    235 			release = qxl_release_from_id_locked(qdev, id);
    236 			if (release == NULL)
    237 				break;
    238 
    239 			info = qxl_release_map(qdev, release);
    240 			next_id = info->next;
    241 			qxl_release_unmap(qdev, release, info);
    242 
    243 			DRM_DEBUG_DRIVER("popped %lld, next %lld\n", id,
    244 					 next_id);
    245 
    246 			switch (release->type) {
    247 			case QXL_RELEASE_DRAWABLE:
    248 			case QXL_RELEASE_SURFACE_CMD:
    249 			case QXL_RELEASE_CURSOR_CMD:
    250 				break;
    251 			default:
    252 				DRM_ERROR("unexpected release type\n");
    253 				break;
    254 			}
    255 			id = next_id;
    256 
    257 			qxl_release_free(qdev, release);
    258 			++i;
    259 		}
    260 	}
    261 
    262 	DRM_DEBUG_DRIVER("%d\n", i);
    263 
    264 	return i;
    265 }
    266 
    267 int qxl_alloc_bo_reserved(struct qxl_device *qdev,
    268 			  struct qxl_release *release,
    269 			  unsigned long size,
    270 			  struct qxl_bo **_bo)
    271 {
    272 	struct qxl_bo *bo;
    273 	int ret;
    274 
    275 	ret = qxl_bo_create(qdev, size, false /* not kernel - device */,
    276 			    false, QXL_GEM_DOMAIN_VRAM, NULL, &bo);
    277 	if (ret) {
    278 		DRM_ERROR("failed to allocate VRAM BO\n");
    279 		return ret;
    280 	}
    281 	ret = qxl_release_list_add(release, bo);
    282 	if (ret)
    283 		goto out_unref;
    284 
    285 	*_bo = bo;
    286 	return 0;
    287 out_unref:
    288 	qxl_bo_unref(&bo);
    289 	return ret;
    290 }
    291 
    292 static int wait_for_io_cmd_user(struct qxl_device *qdev, uint8_t val, long port, bool intr)
    293 {
    294 	int irq_num;
    295 	long addr = qdev->io_base + port;
    296 	int ret;
    297 
    298 	mutex_lock(&qdev->async_io_mutex);
    299 	irq_num = atomic_read(&qdev->irq_received_io_cmd);
    300 	if (qdev->last_sent_io_cmd > irq_num) {
    301 		if (intr)
    302 			ret = wait_event_interruptible_timeout(qdev->io_cmd_event,
    303 							       atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
    304 		else
    305 			ret = wait_event_timeout(qdev->io_cmd_event,
    306 						 atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
    307 		/* 0 is timeout, just bail the "hw" has gone away */
    308 		if (ret <= 0)
    309 			goto out;
    310 		irq_num = atomic_read(&qdev->irq_received_io_cmd);
    311 	}
    312 	outb(val, addr);
    313 	qdev->last_sent_io_cmd = irq_num + 1;
    314 	if (intr)
    315 		ret = wait_event_interruptible_timeout(qdev->io_cmd_event,
    316 						       atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
    317 	else
    318 		ret = wait_event_timeout(qdev->io_cmd_event,
    319 					 atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
    320 out:
    321 	if (ret > 0)
    322 		ret = 0;
    323 	mutex_unlock(&qdev->async_io_mutex);
    324 	return ret;
    325 }
    326 
    327 static void wait_for_io_cmd(struct qxl_device *qdev, uint8_t val, long port)
    328 {
    329 	int ret;
    330 
    331 restart:
    332 	ret = wait_for_io_cmd_user(qdev, val, port, false);
    333 	if (ret == -ERESTARTSYS)
    334 		goto restart;
    335 }
    336 
    337 int qxl_io_update_area(struct qxl_device *qdev, struct qxl_bo *surf,
    338 			const struct qxl_rect *area)
    339 {
    340 	int surface_id;
    341 	uint32_t surface_width, surface_height;
    342 	int ret;
    343 
    344 	if (!surf->hw_surf_alloc)
    345 		DRM_ERROR("got io update area with no hw surface\n");
    346 
    347 	if (surf->is_primary)
    348 		surface_id = 0;
    349 	else
    350 		surface_id = surf->surface_id;
    351 	surface_width = surf->surf.width;
    352 	surface_height = surf->surf.height;
    353 
    354 	if (area->left < 0 || area->top < 0 ||
    355 	    area->right > surface_width || area->bottom > surface_height)
    356 		return -EINVAL;
    357 
    358 	mutex_lock(&qdev->update_area_mutex);
    359 	qdev->ram_header->update_area = *area;
    360 	qdev->ram_header->update_surface = surface_id;
    361 	ret = wait_for_io_cmd_user(qdev, 0, QXL_IO_UPDATE_AREA_ASYNC, true);
    362 	mutex_unlock(&qdev->update_area_mutex);
    363 	return ret;
    364 }
    365 
    366 void qxl_io_notify_oom(struct qxl_device *qdev)
    367 {
    368 	outb(0, qdev->io_base + QXL_IO_NOTIFY_OOM);
    369 }
    370 
    371 void qxl_io_flush_release(struct qxl_device *qdev)
    372 {
    373 	outb(0, qdev->io_base + QXL_IO_FLUSH_RELEASE);
    374 }
    375 
    376 void qxl_io_flush_surfaces(struct qxl_device *qdev)
    377 {
    378 	wait_for_io_cmd(qdev, 0, QXL_IO_FLUSH_SURFACES_ASYNC);
    379 }
    380 
    381 void qxl_io_destroy_primary(struct qxl_device *qdev)
    382 {
    383 	wait_for_io_cmd(qdev, 0, QXL_IO_DESTROY_PRIMARY_ASYNC);
    384 	qdev->primary_bo->is_primary = false;
    385 	drm_gem_object_put_unlocked(&qdev->primary_bo->tbo.base);
    386 	qdev->primary_bo = NULL;
    387 }
    388 
    389 void qxl_io_create_primary(struct qxl_device *qdev, struct qxl_bo *bo)
    390 {
    391 	struct qxl_surface_create *create;
    392 
    393 	if (WARN_ON(qdev->primary_bo))
    394 		return;
    395 
    396 	DRM_DEBUG_DRIVER("qdev %p, ram_header %p\n", qdev, qdev->ram_header);
    397 	create = &qdev->ram_header->create_surface;
    398 	create->format = bo->surf.format;
    399 	create->width = bo->surf.width;
    400 	create->height = bo->surf.height;
    401 	create->stride = bo->surf.stride;
    402 	create->mem = qxl_bo_physical_address(qdev, bo, 0);
    403 
    404 	DRM_DEBUG_DRIVER("mem = %llx, from %p\n", create->mem, bo->kptr);
    405 
    406 	create->flags = QXL_SURF_FLAG_KEEP_DATA;
    407 	create->type = QXL_SURF_TYPE_PRIMARY;
    408 
    409 	wait_for_io_cmd(qdev, 0, QXL_IO_CREATE_PRIMARY_ASYNC);
    410 	qdev->primary_bo = bo;
    411 	qdev->primary_bo->is_primary = true;
    412 	drm_gem_object_get(&qdev->primary_bo->tbo.base);
    413 }
    414 
    415 void qxl_io_memslot_add(struct qxl_device *qdev, uint8_t id)
    416 {
    417 	DRM_DEBUG_DRIVER("qxl_memslot_add %d\n", id);
    418 	wait_for_io_cmd(qdev, id, QXL_IO_MEMSLOT_ADD_ASYNC);
    419 }
    420 
    421 void qxl_io_reset(struct qxl_device *qdev)
    422 {
    423 	outb(0, qdev->io_base + QXL_IO_RESET);
    424 }
    425 
    426 void qxl_io_monitors_config(struct qxl_device *qdev)
    427 {
    428 	wait_for_io_cmd(qdev, 0, QXL_IO_MONITORS_CONFIG_ASYNC);
    429 }
    430 
    431 int qxl_surface_id_alloc(struct qxl_device *qdev,
    432 		      struct qxl_bo *surf)
    433 {
    434 	uint32_t handle;
    435 	int idr_ret;
    436 	int count = 0;
    437 again:
    438 	idr_preload(GFP_ATOMIC);
    439 	spin_lock(&qdev->surf_id_idr_lock);
    440 	idr_ret = idr_alloc(&qdev->surf_id_idr, NULL, 1, 0, GFP_NOWAIT);
    441 	spin_unlock(&qdev->surf_id_idr_lock);
    442 	idr_preload_end();
    443 	if (idr_ret < 0)
    444 		return idr_ret;
    445 	handle = idr_ret;
    446 
    447 	if (handle >= qdev->rom->n_surfaces) {
    448 		count++;
    449 		spin_lock(&qdev->surf_id_idr_lock);
    450 		idr_remove(&qdev->surf_id_idr, handle);
    451 		spin_unlock(&qdev->surf_id_idr_lock);
    452 		qxl_reap_surface_id(qdev, 2);
    453 		goto again;
    454 	}
    455 	surf->surface_id = handle;
    456 
    457 	spin_lock(&qdev->surf_id_idr_lock);
    458 	qdev->last_alloced_surf_id = handle;
    459 	spin_unlock(&qdev->surf_id_idr_lock);
    460 	return 0;
    461 }
    462 
    463 void qxl_surface_id_dealloc(struct qxl_device *qdev,
    464 			    uint32_t surface_id)
    465 {
    466 	spin_lock(&qdev->surf_id_idr_lock);
    467 	idr_remove(&qdev->surf_id_idr, surface_id);
    468 	spin_unlock(&qdev->surf_id_idr_lock);
    469 }
    470 
    471 int qxl_hw_surface_alloc(struct qxl_device *qdev,
    472 			 struct qxl_bo *surf)
    473 {
    474 	struct qxl_surface_cmd *cmd;
    475 	struct qxl_release *release;
    476 	int ret;
    477 
    478 	if (surf->hw_surf_alloc)
    479 		return 0;
    480 
    481 	ret = qxl_alloc_surface_release_reserved(qdev, QXL_SURFACE_CMD_CREATE,
    482 						 NULL,
    483 						 &release);
    484 	if (ret)
    485 		return ret;
    486 
    487 	ret = qxl_release_reserve_list(release, true);
    488 	if (ret)
    489 		return ret;
    490 
    491 	cmd = (struct qxl_surface_cmd *)qxl_release_map(qdev, release);
    492 	cmd->type = QXL_SURFACE_CMD_CREATE;
    493 	cmd->flags = QXL_SURF_FLAG_KEEP_DATA;
    494 	cmd->u.surface_create.format = surf->surf.format;
    495 	cmd->u.surface_create.width = surf->surf.width;
    496 	cmd->u.surface_create.height = surf->surf.height;
    497 	cmd->u.surface_create.stride = surf->surf.stride;
    498 	cmd->u.surface_create.data = qxl_bo_physical_address(qdev, surf, 0);
    499 	cmd->surface_id = surf->surface_id;
    500 	qxl_release_unmap(qdev, release, &cmd->release_info);
    501 
    502 	surf->surf_create = release;
    503 
    504 	/* no need to add a release to the fence for this surface bo,
    505 	   since it is only released when we ask to destroy the surface
    506 	   and it would never signal otherwise */
    507 	qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false);
    508 	qxl_release_fence_buffer_objects(release);
    509 
    510 	surf->hw_surf_alloc = true;
    511 	spin_lock(&qdev->surf_id_idr_lock);
    512 	idr_replace(&qdev->surf_id_idr, surf, surf->surface_id);
    513 	spin_unlock(&qdev->surf_id_idr_lock);
    514 	return 0;
    515 }
    516 
    517 int qxl_hw_surface_dealloc(struct qxl_device *qdev,
    518 			   struct qxl_bo *surf)
    519 {
    520 	struct qxl_surface_cmd *cmd;
    521 	struct qxl_release *release;
    522 	int ret;
    523 	int id;
    524 
    525 	if (!surf->hw_surf_alloc)
    526 		return 0;
    527 
    528 	ret = qxl_alloc_surface_release_reserved(qdev, QXL_SURFACE_CMD_DESTROY,
    529 						 surf->surf_create,
    530 						 &release);
    531 	if (ret)
    532 		return ret;
    533 
    534 	surf->surf_create = NULL;
    535 	/* remove the surface from the idr, but not the surface id yet */
    536 	spin_lock(&qdev->surf_id_idr_lock);
    537 	idr_replace(&qdev->surf_id_idr, NULL, surf->surface_id);
    538 	spin_unlock(&qdev->surf_id_idr_lock);
    539 	surf->hw_surf_alloc = false;
    540 
    541 	id = surf->surface_id;
    542 	surf->surface_id = 0;
    543 
    544 	release->surface_release_id = id;
    545 	cmd = (struct qxl_surface_cmd *)qxl_release_map(qdev, release);
    546 	cmd->type = QXL_SURFACE_CMD_DESTROY;
    547 	cmd->surface_id = id;
    548 	qxl_release_unmap(qdev, release, &cmd->release_info);
    549 
    550 	qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false);
    551 
    552 	qxl_release_fence_buffer_objects(release);
    553 
    554 	return 0;
    555 }
    556 
    557 static int qxl_update_surface(struct qxl_device *qdev, struct qxl_bo *surf)
    558 {
    559 	struct qxl_rect rect;
    560 	int ret;
    561 
    562 	/* if we are evicting, we need to make sure the surface is up
    563 	   to date */
    564 	rect.left = 0;
    565 	rect.right = surf->surf.width;
    566 	rect.top = 0;
    567 	rect.bottom = surf->surf.height;
    568 retry:
    569 	ret = qxl_io_update_area(qdev, surf, &rect);
    570 	if (ret == -ERESTARTSYS)
    571 		goto retry;
    572 	return ret;
    573 }
    574 
    575 static void qxl_surface_evict_locked(struct qxl_device *qdev, struct qxl_bo *surf, bool do_update_area)
    576 {
    577 	/* no need to update area if we are just freeing the surface normally */
    578 	if (do_update_area)
    579 		qxl_update_surface(qdev, surf);
    580 
    581 	/* nuke the surface id at the hw */
    582 	qxl_hw_surface_dealloc(qdev, surf);
    583 }
    584 
    585 void qxl_surface_evict(struct qxl_device *qdev, struct qxl_bo *surf, bool do_update_area)
    586 {
    587 	mutex_lock(&qdev->surf_evict_mutex);
    588 	qxl_surface_evict_locked(qdev, surf, do_update_area);
    589 	mutex_unlock(&qdev->surf_evict_mutex);
    590 }
    591 
    592 static int qxl_reap_surf(struct qxl_device *qdev, struct qxl_bo *surf, bool stall)
    593 {
    594 	int ret;
    595 
    596 	ret = qxl_bo_reserve(surf, false);
    597 	if (ret)
    598 		return ret;
    599 
    600 	if (stall)
    601 		mutex_unlock(&qdev->surf_evict_mutex);
    602 
    603 	ret = ttm_bo_wait(&surf->tbo, true, !stall);
    604 
    605 	if (stall)
    606 		mutex_lock(&qdev->surf_evict_mutex);
    607 	if (ret) {
    608 		qxl_bo_unreserve(surf);
    609 		return ret;
    610 	}
    611 
    612 	qxl_surface_evict_locked(qdev, surf, true);
    613 	qxl_bo_unreserve(surf);
    614 	return 0;
    615 }
    616 
    617 static int qxl_reap_surface_id(struct qxl_device *qdev, int max_to_reap)
    618 {
    619 	int num_reaped = 0;
    620 	int i, ret;
    621 	bool stall = false;
    622 	int start = 0;
    623 
    624 	mutex_lock(&qdev->surf_evict_mutex);
    625 again:
    626 
    627 	spin_lock(&qdev->surf_id_idr_lock);
    628 	start = qdev->last_alloced_surf_id + 1;
    629 	spin_unlock(&qdev->surf_id_idr_lock);
    630 
    631 	for (i = start; i < start + qdev->rom->n_surfaces; i++) {
    632 		void *objptr;
    633 		int surfid = i % qdev->rom->n_surfaces;
    634 
    635 		/* this avoids the case where the objects is in the
    636 		   idr but has been evicted half way - its makes
    637 		   the idr lookup atomic with the eviction */
    638 		spin_lock(&qdev->surf_id_idr_lock);
    639 		objptr = idr_find(&qdev->surf_id_idr, surfid);
    640 		spin_unlock(&qdev->surf_id_idr_lock);
    641 
    642 		if (!objptr)
    643 			continue;
    644 
    645 		ret = qxl_reap_surf(qdev, objptr, stall);
    646 		if (ret == 0)
    647 			num_reaped++;
    648 		if (num_reaped >= max_to_reap)
    649 			break;
    650 	}
    651 	if (num_reaped == 0 && stall == false) {
    652 		stall = true;
    653 		goto again;
    654 	}
    655 
    656 	mutex_unlock(&qdev->surf_evict_mutex);
    657 	if (num_reaped) {
    658 		usleep_range(500, 1000);
    659 		qxl_queue_garbage_collect(qdev, true);
    660 	}
    661 
    662 	return 0;
    663 }
    664