Lines Matching refs:qdev
40 static int qxl_reap_surface_id(struct qxl_device *qdev, int max_to_reap);
191 qxl_push_command_ring_release(struct qxl_device *qdev, struct qxl_release *release,
197 cmd.data = qxl_bo_physical_address(qdev, release->release_bo, release->release_offset);
199 return qxl_ring_push(qdev->command_ring, &cmd, interruptible);
203 qxl_push_cursor_ring_release(struct qxl_device *qdev, struct qxl_release *release,
209 cmd.data = qxl_bo_physical_address(qdev, release->release_bo, release->release_offset);
211 return qxl_ring_push(qdev->cursor_ring, &cmd, interruptible);
214 bool qxl_queue_garbage_collect(struct qxl_device *qdev, bool flush)
216 if (!qxl_check_idle(qdev->release_ring)) {
217 schedule_work(&qdev->gc_work);
219 flush_work(&qdev->gc_work);
225 int qxl_garbage_collect(struct qxl_device *qdev)
232 while (qxl_ring_pop(qdev->release_ring, &id)) {
235 release = qxl_release_from_id_locked(qdev, id);
239 info = qxl_release_map(qdev, release);
241 qxl_release_unmap(qdev, release, info);
257 qxl_release_free(qdev, release);
267 int qxl_alloc_bo_reserved(struct qxl_device *qdev,
275 ret = qxl_bo_create(qdev, size, false /* not kernel - device */,
292 static int wait_for_io_cmd_user(struct qxl_device *qdev, uint8_t val, long port, bool intr)
295 long addr = qdev->io_base + port;
298 mutex_lock(&qdev->async_io_mutex);
299 irq_num = atomic_read(&qdev->irq_received_io_cmd);
300 if (qdev->last_sent_io_cmd > irq_num) {
302 ret = wait_event_interruptible_timeout(qdev->io_cmd_event,
303 atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
305 ret = wait_event_timeout(qdev->io_cmd_event,
306 atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
310 irq_num = atomic_read(&qdev->irq_received_io_cmd);
313 qdev->last_sent_io_cmd = irq_num + 1;
315 ret = wait_event_interruptible_timeout(qdev->io_cmd_event,
316 atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
318 ret = wait_event_timeout(qdev->io_cmd_event,
319 atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
323 mutex_unlock(&qdev->async_io_mutex);
327 static void wait_for_io_cmd(struct qxl_device *qdev, uint8_t val, long port)
332 ret = wait_for_io_cmd_user(qdev, val, port, false);
337 int qxl_io_update_area(struct qxl_device *qdev, struct qxl_bo *surf,
358 mutex_lock(&qdev->update_area_mutex);
359 qdev->ram_header->update_area = *area;
360 qdev->ram_header->update_surface = surface_id;
361 ret = wait_for_io_cmd_user(qdev, 0, QXL_IO_UPDATE_AREA_ASYNC, true);
362 mutex_unlock(&qdev->update_area_mutex);
366 void qxl_io_notify_oom(struct qxl_device *qdev)
368 outb(0, qdev->io_base + QXL_IO_NOTIFY_OOM);
371 void qxl_io_flush_release(struct qxl_device *qdev)
373 outb(0, qdev->io_base + QXL_IO_FLUSH_RELEASE);
376 void qxl_io_flush_surfaces(struct qxl_device *qdev)
378 wait_for_io_cmd(qdev, 0, QXL_IO_FLUSH_SURFACES_ASYNC);
381 void qxl_io_destroy_primary(struct qxl_device *qdev)
383 wait_for_io_cmd(qdev, 0, QXL_IO_DESTROY_PRIMARY_ASYNC);
384 qdev->primary_bo->is_primary = false;
385 drm_gem_object_put_unlocked(&qdev->primary_bo->tbo.base);
386 qdev->primary_bo = NULL;
389 void qxl_io_create_primary(struct qxl_device *qdev, struct qxl_bo *bo)
393 if (WARN_ON(qdev->primary_bo))
396 DRM_DEBUG_DRIVER("qdev %p, ram_header %p\n", qdev, qdev->ram_header);
397 create = &qdev->ram_header->create_surface;
402 create->mem = qxl_bo_physical_address(qdev, bo, 0);
409 wait_for_io_cmd(qdev, 0, QXL_IO_CREATE_PRIMARY_ASYNC);
410 qdev->primary_bo = bo;
411 qdev->primary_bo->is_primary = true;
412 drm_gem_object_get(&qdev->primary_bo->tbo.base);
415 void qxl_io_memslot_add(struct qxl_device *qdev, uint8_t id)
418 wait_for_io_cmd(qdev, id, QXL_IO_MEMSLOT_ADD_ASYNC);
421 void qxl_io_reset(struct qxl_device *qdev)
423 outb(0, qdev->io_base + QXL_IO_RESET);
426 void qxl_io_monitors_config(struct qxl_device *qdev)
428 wait_for_io_cmd(qdev, 0, QXL_IO_MONITORS_CONFIG_ASYNC);
431 int qxl_surface_id_alloc(struct qxl_device *qdev,
439 spin_lock(&qdev->surf_id_idr_lock);
440 idr_ret = idr_alloc(&qdev->surf_id_idr, NULL, 1, 0, GFP_NOWAIT);
441 spin_unlock(&qdev->surf_id_idr_lock);
447 if (handle >= qdev->rom->n_surfaces) {
449 spin_lock(&qdev->surf_id_idr_lock);
450 idr_remove(&qdev->surf_id_idr, handle);
451 spin_unlock(&qdev->surf_id_idr_lock);
452 qxl_reap_surface_id(qdev, 2);
457 spin_lock(&qdev->surf_id_idr_lock);
458 qdev->last_alloced_surf_id = handle;
459 spin_unlock(&qdev->surf_id_idr_lock);
463 void qxl_surface_id_dealloc(struct qxl_device *qdev,
466 spin_lock(&qdev->surf_id_idr_lock);
467 idr_remove(&qdev->surf_id_idr, surface_id);
468 spin_unlock(&qdev->surf_id_idr_lock);
471 int qxl_hw_surface_alloc(struct qxl_device *qdev,
481 ret = qxl_alloc_surface_release_reserved(qdev, QXL_SURFACE_CMD_CREATE,
491 cmd = (struct qxl_surface_cmd *)qxl_release_map(qdev, release);
498 cmd->u.surface_create.data = qxl_bo_physical_address(qdev, surf, 0);
500 qxl_release_unmap(qdev, release, &cmd->release_info);
507 qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false);
511 spin_lock(&qdev->surf_id_idr_lock);
512 idr_replace(&qdev->surf_id_idr, surf, surf->surface_id);
513 spin_unlock(&qdev->surf_id_idr_lock);
517 int qxl_hw_surface_dealloc(struct qxl_device *qdev,
528 ret = qxl_alloc_surface_release_reserved(qdev, QXL_SURFACE_CMD_DESTROY,
536 spin_lock(&qdev->surf_id_idr_lock);
537 idr_replace(&qdev->surf_id_idr, NULL, surf->surface_id);
538 spin_unlock(&qdev->surf_id_idr_lock);
545 cmd = (struct qxl_surface_cmd *)qxl_release_map(qdev, release);
548 qxl_release_unmap(qdev, release, &cmd->release_info);
550 qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false);
557 static int qxl_update_surface(struct qxl_device *qdev, struct qxl_bo *surf)
569 ret = qxl_io_update_area(qdev, surf, &rect);
575 static void qxl_surface_evict_locked(struct qxl_device *qdev, struct qxl_bo *surf, bool do_update_area)
579 qxl_update_surface(qdev, surf);
582 qxl_hw_surface_dealloc(qdev, surf);
585 void qxl_surface_evict(struct qxl_device *qdev, struct qxl_bo *surf, bool do_update_area)
587 mutex_lock(&qdev->surf_evict_mutex);
588 qxl_surface_evict_locked(qdev, surf, do_update_area);
589 mutex_unlock(&qdev->surf_evict_mutex);
592 static int qxl_reap_surf(struct qxl_device *qdev, struct qxl_bo *surf, bool stall)
601 mutex_unlock(&qdev->surf_evict_mutex);
606 mutex_lock(&qdev->surf_evict_mutex);
612 qxl_surface_evict_locked(qdev, surf, true);
617 static int qxl_reap_surface_id(struct qxl_device *qdev, int max_to_reap)
624 mutex_lock(&qdev->surf_evict_mutex);
627 spin_lock(&qdev->surf_id_idr_lock);
628 start = qdev->last_alloced_surf_id + 1;
629 spin_unlock(&qdev->surf_id_idr_lock);
631 for (i = start; i < start + qdev->rom->n_surfaces; i++) {
633 int surfid = i % qdev->rom->n_surfaces;
638 spin_lock(&qdev->surf_id_idr_lock);
639 objptr = idr_find(&qdev->surf_id_idr, surfid);
640 spin_unlock(&qdev->surf_id_idr_lock);
645 qdev, objptr, stall);
656 mutex_unlock(&qdev->surf_evict_mutex);
659 qxl_queue_garbage_collect(qdev, true);