Lines Matching defs:qdev
65 struct qxl_device *qdev;
71 qdev = container_of(fence->lock, struct qxl_device, release_lock);
81 qxl_io_notify_oom(qdev);
84 if (!qxl_queue_garbage_collect(qdev, true))
129 qxl_release_alloc(struct qxl_device *qdev, int type,
148 spin_lock(&qdev->release_idr_lock);
149 handle = idr_alloc(&qdev->release_idr, release, 1, 0, GFP_NOWAIT);
150 release->base.seqno = ++qdev->release_seqno;
151 spin_unlock(&qdev->release_idr_lock);
182 qxl_release_free(struct qxl_device *qdev,
188 qxl_surface_id_dealloc(qdev, release->surface_release_id);
190 spin_lock(&qdev->release_idr_lock);
191 idr_remove(&qdev->release_idr, release->id);
192 spin_unlock(&qdev->release_idr_lock);
206 static int qxl_release_bo_alloc(struct qxl_device *qdev,
210 return qxl_bo_create(qdev, PAGE_SIZE, false, true,
294 int qxl_alloc_surface_release_reserved(struct qxl_device *qdev,
305 idr_ret = qxl_release_alloc(qdev, QXL_RELEASE_SURFACE_CMD, release);
315 info = qxl_release_map(qdev, *release);
317 qxl_release_unmap(qdev, *release, info);
321 return qxl_alloc_release_reserved(qdev, sizeof(struct qxl_surface_cmd),
325 int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size,
346 idr_ret = qxl_release_alloc(qdev, type, release);
353 mutex_lock(&qdev->release_mutex);
354 if (qdev->current_release_bo_offset[cur_idx] + 1 >= releases_per_bo[cur_idx]) {
355 qxl_bo_unref(&qdev->current_release_bo[cur_idx]);
356 qdev->current_release_bo_offset[cur_idx] = 0;
357 qdev->current_release_bo[cur_idx] = NULL;
359 if (!qdev->current_release_bo[cur_idx]) {
360 ret = qxl_release_bo_alloc(qdev, &qdev->current_release_bo[cur_idx]);
362 mutex_unlock(&qdev->release_mutex);
363 qxl_release_free(qdev, *release);
368 bo = qxl_bo_ref(qdev->current_release_bo[cur_idx]);
371 (*release)->release_offset = qdev->current_release_bo_offset[cur_idx] * release_size_per_bo[cur_idx];
372 qdev->current_release_bo_offset[cur_idx]++;
377 mutex_unlock(&qdev->release_mutex);
382 qxl_release_free(qdev, *release);
386 info = qxl_release_map(qdev, *release);
388 qxl_release_unmap(qdev, *release, info);
393 struct qxl_release *qxl_release_from_id_locked(struct qxl_device *qdev,
398 spin_lock(&qdev->release_idr_lock);
399 release = idr_find(&qdev->release_idr, id);
400 spin_unlock(&qdev->release_idr_lock);
409 union qxl_release_info *qxl_release_map(struct qxl_device *qdev,
416 ptr = qxl_bo_kmap_atomic_page(qdev, bo, release->release_offset & PAGE_MASK);
423 void qxl_release_unmap(struct qxl_device *qdev,
431 qxl_bo_kunmap_atomic_page(qdev, bo, ptr);
439 struct qxl_device *qdev;
448 qdev = container_of(bdev, struct qxl_device, mman.bdev);
454 dma_fence_init(&release->base, &qxl_fence_ops, &qdev->release_lock,