qxl_cmd.c revision 1.1.1.1.30.1 1 /* $NetBSD: qxl_cmd.c,v 1.1.1.1.30.1 2018/09/06 06:56:31 pgoyette Exp $ */
2
3 /*
4 * Copyright 2013 Red Hat Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alon Levy
26 */
27
28 /* QXL cmd/ring handling */
29
30 #include <sys/cdefs.h>
31 __KERNEL_RCSID(0, "$NetBSD: qxl_cmd.c,v 1.1.1.1.30.1 2018/09/06 06:56:31 pgoyette Exp $");
32
33 #include "qxl_drv.h"
34 #include "qxl_object.h"
35
36 static int qxl_reap_surface_id(struct qxl_device *qdev, int max_to_reap);
37
38 struct ring {
39 struct qxl_ring_header header;
40 uint8_t elements[0];
41 };
42
43 struct qxl_ring {
44 struct ring *ring;
45 int element_size;
46 int n_elements;
47 int prod_notify;
48 wait_queue_head_t *push_event;
49 spinlock_t lock;
50 };
51
52 void qxl_ring_free(struct qxl_ring *ring)
53 {
54 kfree(ring);
55 }
56
57 void qxl_ring_init_hdr(struct qxl_ring *ring)
58 {
59 ring->ring->header.notify_on_prod = ring->n_elements;
60 }
61
62 struct qxl_ring *
63 qxl_ring_create(struct qxl_ring_header *header,
64 int element_size,
65 int n_elements,
66 int prod_notify,
67 bool set_prod_notify,
68 wait_queue_head_t *push_event)
69 {
70 struct qxl_ring *ring;
71
72 ring = kmalloc(sizeof(*ring), GFP_KERNEL);
73 if (!ring)
74 return NULL;
75
76 ring->ring = (struct ring *)header;
77 ring->element_size = element_size;
78 ring->n_elements = n_elements;
79 ring->prod_notify = prod_notify;
80 ring->push_event = push_event;
81 if (set_prod_notify)
82 qxl_ring_init_hdr(ring);
83 spin_lock_init(&ring->lock);
84 return ring;
85 }
86
87 static int qxl_check_header(struct qxl_ring *ring)
88 {
89 int ret;
90 struct qxl_ring_header *header = &(ring->ring->header);
91 unsigned long flags;
92 spin_lock_irqsave(&ring->lock, flags);
93 ret = header->prod - header->cons < header->num_items;
94 if (ret == 0)
95 header->notify_on_cons = header->cons + 1;
96 spin_unlock_irqrestore(&ring->lock, flags);
97 return ret;
98 }
99
100 int qxl_check_idle(struct qxl_ring *ring)
101 {
102 int ret;
103 struct qxl_ring_header *header = &(ring->ring->header);
104 unsigned long flags;
105 spin_lock_irqsave(&ring->lock, flags);
106 ret = header->prod == header->cons;
107 spin_unlock_irqrestore(&ring->lock, flags);
108 return ret;
109 }
110
111 int qxl_ring_push(struct qxl_ring *ring,
112 const void *new_elt, bool interruptible)
113 {
114 struct qxl_ring_header *header = &(ring->ring->header);
115 uint8_t *elt;
116 int idx, ret;
117 unsigned long flags;
118 spin_lock_irqsave(&ring->lock, flags);
119 if (header->prod - header->cons == header->num_items) {
120 header->notify_on_cons = header->cons + 1;
121 mb();
122 spin_unlock_irqrestore(&ring->lock, flags);
123 if (!drm_can_sleep()) {
124 while (!qxl_check_header(ring))
125 udelay(1);
126 } else {
127 if (interruptible) {
128 ret = wait_event_interruptible(*ring->push_event,
129 qxl_check_header(ring));
130 if (ret)
131 return ret;
132 } else {
133 wait_event(*ring->push_event,
134 qxl_check_header(ring));
135 }
136
137 }
138 spin_lock_irqsave(&ring->lock, flags);
139 }
140
141 idx = header->prod & (ring->n_elements - 1);
142 elt = ring->ring->elements + idx * ring->element_size;
143
144 memcpy((void *)elt, new_elt, ring->element_size);
145
146 header->prod++;
147
148 mb();
149
150 if (header->prod == header->notify_on_prod)
151 outb(0, ring->prod_notify);
152
153 spin_unlock_irqrestore(&ring->lock, flags);
154 return 0;
155 }
156
157 static bool qxl_ring_pop(struct qxl_ring *ring,
158 void *element)
159 {
160 volatile struct qxl_ring_header *header = &(ring->ring->header);
161 volatile uint8_t *ring_elt;
162 int idx;
163 unsigned long flags;
164 spin_lock_irqsave(&ring->lock, flags);
165 if (header->cons == header->prod) {
166 header->notify_on_prod = header->cons + 1;
167 spin_unlock_irqrestore(&ring->lock, flags);
168 return false;
169 }
170
171 idx = header->cons & (ring->n_elements - 1);
172 ring_elt = ring->ring->elements + idx * ring->element_size;
173
174 memcpy(element, (void *)ring_elt, ring->element_size);
175
176 header->cons++;
177
178 spin_unlock_irqrestore(&ring->lock, flags);
179 return true;
180 }
181
182 int
183 qxl_push_command_ring_release(struct qxl_device *qdev, struct qxl_release *release,
184 uint32_t type, bool interruptible)
185 {
186 struct qxl_command cmd;
187 struct qxl_bo_list *entry = list_first_entry(&release->bos, struct qxl_bo_list, tv.head);
188
189 cmd.type = type;
190 cmd.data = qxl_bo_physical_address(qdev, to_qxl_bo(entry->tv.bo), release->release_offset);
191
192 return qxl_ring_push(qdev->command_ring, &cmd, interruptible);
193 }
194
195 int
196 qxl_push_cursor_ring_release(struct qxl_device *qdev, struct qxl_release *release,
197 uint32_t type, bool interruptible)
198 {
199 struct qxl_command cmd;
200 struct qxl_bo_list *entry = list_first_entry(&release->bos, struct qxl_bo_list, tv.head);
201
202 cmd.type = type;
203 cmd.data = qxl_bo_physical_address(qdev, to_qxl_bo(entry->tv.bo), release->release_offset);
204
205 return qxl_ring_push(qdev->cursor_ring, &cmd, interruptible);
206 }
207
208 bool qxl_queue_garbage_collect(struct qxl_device *qdev, bool flush)
209 {
210 if (!qxl_check_idle(qdev->release_ring)) {
211 queue_work(qdev->gc_queue, &qdev->gc_work);
212 if (flush)
213 flush_work(&qdev->gc_work);
214 return true;
215 }
216 return false;
217 }
218
219 int qxl_garbage_collect(struct qxl_device *qdev)
220 {
221 struct qxl_release *release;
222 uint64_t id, next_id;
223 int i = 0;
224 union qxl_release_info *info;
225
226 while (qxl_ring_pop(qdev->release_ring, &id)) {
227 QXL_INFO(qdev, "popped %lld\n", id);
228 while (id) {
229 release = qxl_release_from_id_locked(qdev, id);
230 if (release == NULL)
231 break;
232
233 info = qxl_release_map(qdev, release);
234 next_id = info->next;
235 qxl_release_unmap(qdev, release, info);
236
237 QXL_INFO(qdev, "popped %lld, next %lld\n", id,
238 next_id);
239
240 switch (release->type) {
241 case QXL_RELEASE_DRAWABLE:
242 case QXL_RELEASE_SURFACE_CMD:
243 case QXL_RELEASE_CURSOR_CMD:
244 break;
245 default:
246 DRM_ERROR("unexpected release type\n");
247 break;
248 }
249 id = next_id;
250
251 qxl_release_free(qdev, release);
252 ++i;
253 }
254 }
255
256 QXL_INFO(qdev, "%s: %d\n", __func__, i);
257
258 return i;
259 }
260
261 int qxl_alloc_bo_reserved(struct qxl_device *qdev,
262 struct qxl_release *release,
263 unsigned long size,
264 struct qxl_bo **_bo)
265 {
266 struct qxl_bo *bo;
267 int ret;
268
269 ret = qxl_bo_create(qdev, size, false /* not kernel - device */,
270 false, QXL_GEM_DOMAIN_VRAM, NULL, &bo);
271 if (ret) {
272 DRM_ERROR("failed to allocate VRAM BO\n");
273 return ret;
274 }
275 ret = qxl_release_list_add(release, bo);
276 if (ret)
277 goto out_unref;
278
279 *_bo = bo;
280 return 0;
281 out_unref:
282 qxl_bo_unref(&bo);
283 return ret;
284 }
285
286 static int wait_for_io_cmd_user(struct qxl_device *qdev, uint8_t val, long port, bool intr)
287 {
288 int irq_num;
289 long addr = qdev->io_base + port;
290 int ret;
291
292 mutex_lock(&qdev->async_io_mutex);
293 irq_num = atomic_read(&qdev->irq_received_io_cmd);
294 if (qdev->last_sent_io_cmd > irq_num) {
295 if (intr)
296 ret = wait_event_interruptible_timeout(qdev->io_cmd_event,
297 atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
298 else
299 ret = wait_event_timeout(qdev->io_cmd_event,
300 atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
301 /* 0 is timeout, just bail the "hw" has gone away */
302 if (ret <= 0)
303 goto out;
304 irq_num = atomic_read(&qdev->irq_received_io_cmd);
305 }
306 outb(val, addr);
307 qdev->last_sent_io_cmd = irq_num + 1;
308 if (intr)
309 ret = wait_event_interruptible_timeout(qdev->io_cmd_event,
310 atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
311 else
312 ret = wait_event_timeout(qdev->io_cmd_event,
313 atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
314 out:
315 if (ret > 0)
316 ret = 0;
317 mutex_unlock(&qdev->async_io_mutex);
318 return ret;
319 }
320
321 static void wait_for_io_cmd(struct qxl_device *qdev, uint8_t val, long port)
322 {
323 int ret;
324
325 restart:
326 ret = wait_for_io_cmd_user(qdev, val, port, false);
327 if (ret == -ERESTARTSYS)
328 goto restart;
329 }
330
331 int qxl_io_update_area(struct qxl_device *qdev, struct qxl_bo *surf,
332 const struct qxl_rect *area)
333 {
334 int surface_id;
335 uint32_t surface_width, surface_height;
336 int ret;
337
338 if (!surf->hw_surf_alloc)
339 DRM_ERROR("got io update area with no hw surface\n");
340
341 if (surf->is_primary)
342 surface_id = 0;
343 else
344 surface_id = surf->surface_id;
345 surface_width = surf->surf.width;
346 surface_height = surf->surf.height;
347
348 if (area->left < 0 || area->top < 0 ||
349 area->right > surface_width || area->bottom > surface_height) {
350 qxl_io_log(qdev, "%s: not doing area update for "
351 "%d, (%d,%d,%d,%d) (%d,%d)\n", __func__, surface_id, area->left,
352 area->top, area->right, area->bottom, surface_width, surface_height);
353 return -EINVAL;
354 }
355 mutex_lock(&qdev->update_area_mutex);
356 qdev->ram_header->update_area = *area;
357 qdev->ram_header->update_surface = surface_id;
358 ret = wait_for_io_cmd_user(qdev, 0, QXL_IO_UPDATE_AREA_ASYNC, true);
359 mutex_unlock(&qdev->update_area_mutex);
360 return ret;
361 }
362
363 void qxl_io_notify_oom(struct qxl_device *qdev)
364 {
365 outb(0, qdev->io_base + QXL_IO_NOTIFY_OOM);
366 }
367
368 void qxl_io_flush_release(struct qxl_device *qdev)
369 {
370 outb(0, qdev->io_base + QXL_IO_FLUSH_RELEASE);
371 }
372
373 void qxl_io_flush_surfaces(struct qxl_device *qdev)
374 {
375 wait_for_io_cmd(qdev, 0, QXL_IO_FLUSH_SURFACES_ASYNC);
376 }
377
378
379 void qxl_io_destroy_primary(struct qxl_device *qdev)
380 {
381 wait_for_io_cmd(qdev, 0, QXL_IO_DESTROY_PRIMARY_ASYNC);
382 }
383
384 void qxl_io_create_primary(struct qxl_device *qdev,
385 unsigned offset, struct qxl_bo *bo)
386 {
387 struct qxl_surface_create *create;
388
389 QXL_INFO(qdev, "%s: qdev %p, ram_header %p\n", __func__, qdev,
390 qdev->ram_header);
391 create = &qdev->ram_header->create_surface;
392 create->format = bo->surf.format;
393 create->width = bo->surf.width;
394 create->height = bo->surf.height;
395 create->stride = bo->surf.stride;
396 create->mem = qxl_bo_physical_address(qdev, bo, offset);
397
398 QXL_INFO(qdev, "%s: mem = %llx, from %p\n", __func__, create->mem,
399 bo->kptr);
400
401 create->flags = QXL_SURF_FLAG_KEEP_DATA;
402 create->type = QXL_SURF_TYPE_PRIMARY;
403
404 wait_for_io_cmd(qdev, 0, QXL_IO_CREATE_PRIMARY_ASYNC);
405 }
406
407 void qxl_io_memslot_add(struct qxl_device *qdev, uint8_t id)
408 {
409 QXL_INFO(qdev, "qxl_memslot_add %d\n", id);
410 wait_for_io_cmd(qdev, id, QXL_IO_MEMSLOT_ADD_ASYNC);
411 }
412
413 void qxl_io_log(struct qxl_device *qdev, const char *fmt, ...)
414 {
415 va_list args;
416
417 va_start(args, fmt);
418 vsnprintf(qdev->ram_header->log_buf, QXL_LOG_BUF_SIZE, fmt, args);
419 va_end(args);
420 /*
421 * DO not do a DRM output here - this will call printk, which will
422 * call back into qxl for rendering (qxl_fb)
423 */
424 outb(0, qdev->io_base + QXL_IO_LOG);
425 }
426
427 void qxl_io_reset(struct qxl_device *qdev)
428 {
429 outb(0, qdev->io_base + QXL_IO_RESET);
430 }
431
432 void qxl_io_monitors_config(struct qxl_device *qdev)
433 {
434 qxl_io_log(qdev, "%s: %d [%dx%d+%d+%d]\n", __func__,
435 qdev->monitors_config ?
436 qdev->monitors_config->count : -1,
437 qdev->monitors_config && qdev->monitors_config->count ?
438 qdev->monitors_config->heads[0].width : -1,
439 qdev->monitors_config && qdev->monitors_config->count ?
440 qdev->monitors_config->heads[0].height : -1,
441 qdev->monitors_config && qdev->monitors_config->count ?
442 qdev->monitors_config->heads[0].x : -1,
443 qdev->monitors_config && qdev->monitors_config->count ?
444 qdev->monitors_config->heads[0].y : -1
445 );
446
447 wait_for_io_cmd(qdev, 0, QXL_IO_MONITORS_CONFIG_ASYNC);
448 }
449
450 int qxl_surface_id_alloc(struct qxl_device *qdev,
451 struct qxl_bo *surf)
452 {
453 uint32_t handle;
454 int idr_ret;
455 int count = 0;
456 again:
457 idr_preload(GFP_ATOMIC);
458 spin_lock(&qdev->surf_id_idr_lock);
459 idr_ret = idr_alloc(&qdev->surf_id_idr, NULL, 1, 0, GFP_NOWAIT);
460 spin_unlock(&qdev->surf_id_idr_lock);
461 idr_preload_end();
462 if (idr_ret < 0)
463 return idr_ret;
464 handle = idr_ret;
465
466 if (handle >= qdev->rom->n_surfaces) {
467 count++;
468 spin_lock(&qdev->surf_id_idr_lock);
469 idr_remove(&qdev->surf_id_idr, handle);
470 spin_unlock(&qdev->surf_id_idr_lock);
471 qxl_reap_surface_id(qdev, 2);
472 goto again;
473 }
474 surf->surface_id = handle;
475
476 spin_lock(&qdev->surf_id_idr_lock);
477 qdev->last_alloced_surf_id = handle;
478 spin_unlock(&qdev->surf_id_idr_lock);
479 return 0;
480 }
481
482 void qxl_surface_id_dealloc(struct qxl_device *qdev,
483 uint32_t surface_id)
484 {
485 spin_lock(&qdev->surf_id_idr_lock);
486 idr_remove(&qdev->surf_id_idr, surface_id);
487 spin_unlock(&qdev->surf_id_idr_lock);
488 }
489
490 int qxl_hw_surface_alloc(struct qxl_device *qdev,
491 struct qxl_bo *surf,
492 struct ttm_mem_reg *new_mem)
493 {
494 struct qxl_surface_cmd *cmd;
495 struct qxl_release *release;
496 int ret;
497
498 if (surf->hw_surf_alloc)
499 return 0;
500
501 ret = qxl_alloc_surface_release_reserved(qdev, QXL_SURFACE_CMD_CREATE,
502 NULL,
503 &release);
504 if (ret)
505 return ret;
506
507 ret = qxl_release_reserve_list(release, true);
508 if (ret)
509 return ret;
510
511 cmd = (struct qxl_surface_cmd *)qxl_release_map(qdev, release);
512 cmd->type = QXL_SURFACE_CMD_CREATE;
513 cmd->flags = QXL_SURF_FLAG_KEEP_DATA;
514 cmd->u.surface_create.format = surf->surf.format;
515 cmd->u.surface_create.width = surf->surf.width;
516 cmd->u.surface_create.height = surf->surf.height;
517 cmd->u.surface_create.stride = surf->surf.stride;
518 if (new_mem) {
519 int slot_id = surf->type == QXL_GEM_DOMAIN_VRAM ? qdev->main_mem_slot : qdev->surfaces_mem_slot;
520 struct qxl_memslot *slot = &(qdev->mem_slots[slot_id]);
521
522 /* TODO - need to hold one of the locks to read tbo.offset */
523 cmd->u.surface_create.data = slot->high_bits;
524
525 cmd->u.surface_create.data |= (new_mem->start << PAGE_SHIFT) + surf->tbo.bdev->man[new_mem->mem_type].gpu_offset;
526 } else
527 cmd->u.surface_create.data = qxl_bo_physical_address(qdev, surf, 0);
528 cmd->surface_id = surf->surface_id;
529 qxl_release_unmap(qdev, release, &cmd->release_info);
530
531 surf->surf_create = release;
532
533 /* no need to add a release to the fence for this surface bo,
534 since it is only released when we ask to destroy the surface
535 and it would never signal otherwise */
536 qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false);
537 qxl_release_fence_buffer_objects(release);
538
539 surf->hw_surf_alloc = true;
540 spin_lock(&qdev->surf_id_idr_lock);
541 idr_replace(&qdev->surf_id_idr, surf, surf->surface_id);
542 spin_unlock(&qdev->surf_id_idr_lock);
543 return 0;
544 }
545
546 int qxl_hw_surface_dealloc(struct qxl_device *qdev,
547 struct qxl_bo *surf)
548 {
549 struct qxl_surface_cmd *cmd;
550 struct qxl_release *release;
551 int ret;
552 int id;
553
554 if (!surf->hw_surf_alloc)
555 return 0;
556
557 ret = qxl_alloc_surface_release_reserved(qdev, QXL_SURFACE_CMD_DESTROY,
558 surf->surf_create,
559 &release);
560 if (ret)
561 return ret;
562
563 surf->surf_create = NULL;
564 /* remove the surface from the idr, but not the surface id yet */
565 spin_lock(&qdev->surf_id_idr_lock);
566 idr_replace(&qdev->surf_id_idr, NULL, surf->surface_id);
567 spin_unlock(&qdev->surf_id_idr_lock);
568 surf->hw_surf_alloc = false;
569
570 id = surf->surface_id;
571 surf->surface_id = 0;
572
573 release->surface_release_id = id;
574 cmd = (struct qxl_surface_cmd *)qxl_release_map(qdev, release);
575 cmd->type = QXL_SURFACE_CMD_DESTROY;
576 cmd->surface_id = id;
577 qxl_release_unmap(qdev, release, &cmd->release_info);
578
579 qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false);
580
581 qxl_release_fence_buffer_objects(release);
582
583 return 0;
584 }
585
586 int qxl_update_surface(struct qxl_device *qdev, struct qxl_bo *surf)
587 {
588 struct qxl_rect rect;
589 int ret;
590
591 /* if we are evicting, we need to make sure the surface is up
592 to date */
593 rect.left = 0;
594 rect.right = surf->surf.width;
595 rect.top = 0;
596 rect.bottom = surf->surf.height;
597 retry:
598 ret = qxl_io_update_area(qdev, surf, &rect);
599 if (ret == -ERESTARTSYS)
600 goto retry;
601 return ret;
602 }
603
604 static void qxl_surface_evict_locked(struct qxl_device *qdev, struct qxl_bo *surf, bool do_update_area)
605 {
606 /* no need to update area if we are just freeing the surface normally */
607 if (do_update_area)
608 qxl_update_surface(qdev, surf);
609
610 /* nuke the surface id at the hw */
611 qxl_hw_surface_dealloc(qdev, surf);
612 }
613
614 void qxl_surface_evict(struct qxl_device *qdev, struct qxl_bo *surf, bool do_update_area)
615 {
616 mutex_lock(&qdev->surf_evict_mutex);
617 qxl_surface_evict_locked(qdev, surf, do_update_area);
618 mutex_unlock(&qdev->surf_evict_mutex);
619 }
620
621 static int qxl_reap_surf(struct qxl_device *qdev, struct qxl_bo *surf, bool stall)
622 {
623 int ret;
624
625 ret = qxl_bo_reserve(surf, false);
626 if (ret)
627 return ret;
628
629 if (stall)
630 mutex_unlock(&qdev->surf_evict_mutex);
631
632 ret = ttm_bo_wait(&surf->tbo, true, true, !stall);
633
634 if (stall)
635 mutex_lock(&qdev->surf_evict_mutex);
636 if (ret) {
637 qxl_bo_unreserve(surf);
638 return ret;
639 }
640
641 qxl_surface_evict_locked(qdev, surf, true);
642 qxl_bo_unreserve(surf);
643 return 0;
644 }
645
646 static int qxl_reap_surface_id(struct qxl_device *qdev, int max_to_reap)
647 {
648 int num_reaped = 0;
649 int i, ret;
650 bool stall = false;
651 int start = 0;
652
653 mutex_lock(&qdev->surf_evict_mutex);
654 again:
655
656 spin_lock(&qdev->surf_id_idr_lock);
657 start = qdev->last_alloced_surf_id + 1;
658 spin_unlock(&qdev->surf_id_idr_lock);
659
660 for (i = start; i < start + qdev->rom->n_surfaces; i++) {
661 void *objptr;
662 int surfid = i % qdev->rom->n_surfaces;
663
664 /* this avoids the case where the objects is in the
665 idr but has been evicted half way - its makes
666 the idr lookup atomic with the eviction */
667 spin_lock(&qdev->surf_id_idr_lock);
668 objptr = idr_find(&qdev->surf_id_idr, surfid);
669 spin_unlock(&qdev->surf_id_idr_lock);
670
671 if (!objptr)
672 continue;
673
674 ret = qxl_reap_surf(qdev, objptr, stall);
675 if (ret == 0)
676 num_reaped++;
677 if (num_reaped >= max_to_reap)
678 break;
679 }
680 if (num_reaped == 0 && stall == false) {
681 stall = true;
682 goto again;
683 }
684
685 mutex_unlock(&qdev->surf_evict_mutex);
686 if (num_reaped) {
687 usleep_range(500, 1000);
688 qxl_queue_garbage_collect(qdev, true);
689 }
690
691 return 0;
692 }
693